Symbol: bio
arch/m68k/emu/nfblock.c
60
static void nfhd_submit_bio(struct bio *bio)
arch/m68k/emu/nfblock.c
62
struct nfhd_device *dev = bio->bi_bdev->bd_disk->private_data;
arch/m68k/emu/nfblock.c
66
sector_t sec = bio->bi_iter.bi_sector;
arch/m68k/emu/nfblock.c
68
dir = bio_data_dir(bio);
arch/m68k/emu/nfblock.c
70
bio_for_each_segment(bvec, bio, iter) {
arch/m68k/emu/nfblock.c
77
bio_endio(bio);
arch/powerpc/platforms/pseries/papr_scm.c
92
struct bio *bio __maybe_unused)
arch/xtensa/platforms/iss/simdisk.c
104
static void simdisk_submit_bio(struct bio *bio)
arch/xtensa/platforms/iss/simdisk.c
106
struct simdisk *dev = bio->bi_bdev->bd_disk->private_data;
arch/xtensa/platforms/iss/simdisk.c
109
sector_t sector = bio->bi_iter.bi_sector;
arch/xtensa/platforms/iss/simdisk.c
111
bio_for_each_segment(bvec, bio, iter) {
arch/xtensa/platforms/iss/simdisk.c
116
bio_data_dir(bio) == WRITE);
arch/xtensa/platforms/iss/simdisk.c
121
bio_endio(bio);
block/bfq-cgroup.c
1407
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
block/bfq-cgroup.c
1414
struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
block/bfq-cgroup.c
345
struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
block/bfq-cgroup.c
599
struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
block/bfq-cgroup.c
601
struct blkcg_gq *blkg = bio->bi_blkg;
block/bfq-cgroup.c
611
bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
block/bfq-cgroup.c
616
bio_associate_blkg_from_css(bio,
block/bfq-cgroup.c
774
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
block/bfq-cgroup.c
777
struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
block/bfq-iosched.c
1794
static unsigned int bfq_actuator_index(struct bfq_data *bfqd, struct bio *bio)
block/bfq-iosched.c
1804
end = bio_end_sector(bio) - 1;
block/bfq-iosched.c
1837
unsigned int act_idx = bfq_actuator_index(bfqd, rq->bio);
block/bfq-iosched.c
2362
struct bio *bio,
block/bfq-iosched.c
2369
return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
block/bfq-iosched.c
2447
static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
block/bfq-iosched.c
2462
bfq_bic_update_cgroup(bic, bio);
block/bfq-iosched.c
2464
bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf),
block/bfq-iosched.c
2465
bfq_actuator_index(bfqd, bio));
block/bfq-iosched.c
2471
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
block/bfq-iosched.c
2481
struct bio *bio)
block/bfq-iosched.c
2486
__rq = bfq_find_rq_fmerge(bfqd, bio, q);
block/bfq-iosched.c
2487
if (__rq && elv_bio_merge_ok(__rq, bio)) {
block/bfq-iosched.c
2666
return ((struct bio *)io_struct)->bi_iter.bi_sector;
block/bfq-iosched.c
3231
struct bio *bio)
block/bfq-iosched.c
3234
bool is_sync = op_is_sync(bio->bi_opf);
block/bfq-iosched.c
3254
new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false, bfqd->bio_bic);
block/bfq-iosched.c
5556
struct bio *bio, bool is_sync,
block/bfq-iosched.c
5560
static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
block/bfq-iosched.c
5575
bfqq = bic_to_bfqq(bic, false, bfq_actuator_index(bfqd, bio));
block/bfq-iosched.c
5579
bfqq = bfq_get_queue(bfqd, bio, false, bic, true);
block/bfq-iosched.c
5580
bic_set_bfqq(bic, bfqq, false, bfq_actuator_index(bfqd, bio));
block/bfq-iosched.c
5584
bfqq = bic_to_bfqq(bic, true, bfq_actuator_index(bfqd, bio));
block/bfq-iosched.c
5830
struct bio *bio, bool is_sync,
block/bfq-iosched.c
5840
bfqg = bfq_bio_bfqg(bfqd, bio);
block/bfq-iosched.c
5844
bfq_actuator_index(bfqd, bio));
block/bfq-iosched.c
5855
is_sync, bfq_actuator_index(bfqd, bio));
block/bfq-iosched.c
6172
bfq_actuator_index(bfqd, rq->bio)) == bfqq) {
block/bfq-iosched.c
6247
if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
block/bfq-iosched.c
6738
struct bio *bio, bool split, bool is_sync,
block/bfq-iosched.c
6741
unsigned int act_idx = bfq_actuator_index(bfqd, bio);
block/bfq-iosched.c
6753
bfqq = bfq_get_queue(bfqd, bio, is_sync, bic, split);
block/bfq-iosched.c
6853
struct bio *bio,
block/bfq-iosched.c
6861
bfqq = __bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
block/bfq-iosched.c
6883
bfqq = __bfq_get_bfqq_handle_split(bfqd, bic, bio, true, is_sync, NULL);
block/bfq-iosched.c
6930
struct bio *bio = rq->bio;
block/bfq-iosched.c
6935
unsigned int a_idx = bfq_actuator_index(bfqd, bio);
block/bfq-iosched.c
6951
bfq_check_ioprio_change(bic, bio);
block/bfq-iosched.c
6952
bfq_bic_update_cgroup(bic, bio);
block/bfq-iosched.c
6953
bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, a_idx, is_sync);
block/bfq-iosched.h
1085
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio);
block/bfq-iosched.h
1087
struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio);
block/bio-integrity-auto.c
105
if (!bio_sectors(bio))
block/bio-integrity-auto.c
109
if (bio_integrity(bio))
block/bio-integrity-auto.c
112
switch (bio_op(bio)) {
block/bio-integrity-auto.c
138
if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
block/bio-integrity-auto.c
142
bio_integrity_init(bio, &bid->bip, &bid->bvec, 1);
block/bio-integrity-auto.c
143
bid->bio = bio;
block/bio-integrity-auto.c
145
bio_integrity_alloc_buf(bio, gfp & __GFP_ZERO);
block/bio-integrity-auto.c
147
bip_set_seed(&bid->bip, bio->bi_iter.bi_sector);
block/bio-integrity-auto.c
159
if (bio_data_dir(bio) == WRITE && bip_should_check(&bid->bip))
block/bio-integrity-auto.c
160
blk_integrity_generate(bio);
block/bio-integrity-auto.c
162
bid->saved_bio_iter = bio->bi_iter;
block/bio-integrity-auto.c
17
struct bio *bio;
block/bio-integrity-auto.c
30
bid->bio->bi_integrity = NULL;
block/bio-integrity-auto.c
31
bid->bio->bi_opf &= ~REQ_INTEGRITY;
block/bio-integrity-auto.c
40
struct bio *bio = bid->bio;
block/bio-integrity-auto.c
42
blk_integrity_verify_iter(bio, &bid->saved_bio_iter);
block/bio-integrity-auto.c
44
bio_endio(bio);
block/bio-integrity-auto.c
67
bool __bio_integrity_endio(struct bio *bio)
block/bio-integrity-auto.c
69
struct bio_integrity_payload *bip = bio_integrity(bio);
block/bio-integrity-auto.c
73
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
block/bio-integrity-auto.c
95
bool bio_integrity_prep(struct bio *bio)
block/bio-integrity-auto.c
97
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
block/bio-integrity.c
103
bio_integrity_init(bio, &bia->bip, bia->bvecs, nr_vecs);
block/bio-integrity.c
138
void bio_integrity_unmap_user(struct bio *bio)
block/bio-integrity.c
140
struct bio_integrity_payload *bip = bio_integrity(bio);
block/bio-integrity.c
143
if (bio_data_dir(bio) == READ)
block/bio-integrity.c
161
int bio_integrity_add_page(struct bio *bio, struct page *page,
block/bio-integrity.c
164
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
block/bio-integrity.c
165
struct bio_integrity_payload *bip = bio_integrity(bio);
block/bio-integrity.c
19
void bio_integrity_alloc_buf(struct bio *bio, bool zero_buffer)
block/bio-integrity.c
198
static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
block/bio-integrity.c
201
bool write = op_is_write(bio_op(bio));
block/bio-integrity.c
21
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
block/bio-integrity.c
218
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
block/bio-integrity.c
22
struct bio_integrity_payload *bip = bio_integrity(bio);
block/bio-integrity.c
226
bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs + 1);
block/bio-integrity.c
23
unsigned int len = bio_integrity_bytes(bi, bio_sectors(bio));
block/bio-integrity.c
239
ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
block/bio-integrity.c
250
bio_integrity_free(bio);
block/bio-integrity.c
256
static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
block/bio-integrity.c
261
bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs);
block/bio-integrity.c
305
int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
block/bio-integrity.c
307
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
block/bio-integrity.c
316
if (bio_integrity(bio))
block/bio-integrity.c
349
bio->bi_opf |= REQ_NOMERGE;
block/bio-integrity.c
352
ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes);
block/bio-integrity.c
354
ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes);
block/bio-integrity.c
370
static void bio_uio_meta_to_bip(struct bio *bio, struct uio_meta *meta)
block/bio-integrity.c
372
struct bio_integrity_payload *bip = bio_integrity(bio);
block/bio-integrity.c
384
int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
block/bio-integrity.c
386
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
block/bio-integrity.c
398
integrity_bytes = bio_integrity_bytes(bi, bio_sectors(bio));
block/bio-integrity.c
409
ret = bio_integrity_map_user(bio, &it);
block/bio-integrity.c
411
bio_uio_meta_to_bip(bio, meta);
block/bio-integrity.c
412
bip_set_seed(bio_integrity(bio), meta->seed);
block/bio-integrity.c
414
meta->seed += bio_integrity_intervals(bi, bio_sectors(bio));
block/bio-integrity.c
428
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
block/bio-integrity.c
430
struct bio_integrity_payload *bip = bio_integrity(bio);
block/bio-integrity.c
431
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
block/bio-integrity.c
444
void bio_integrity_trim(struct bio *bio)
block/bio-integrity.c
446
struct bio_integrity_payload *bip = bio_integrity(bio);
block/bio-integrity.c
447
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
block/bio-integrity.c
449
bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
block/bio-integrity.c
461
int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
block/bio-integrity.c
469
bip = bio_integrity_alloc(bio, gfp_mask, 0);
block/bio-integrity.c
62
void bio_integrity_free(struct bio *bio)
block/bio-integrity.c
64
kfree(bio_integrity(bio));
block/bio-integrity.c
65
bio->bi_integrity = NULL;
block/bio-integrity.c
66
bio->bi_opf &= ~REQ_INTEGRITY;
block/bio-integrity.c
69
void bio_integrity_init(struct bio *bio, struct bio_integrity_payload *bip,
block/bio-integrity.c
77
bio->bi_integrity = bip;
block/bio-integrity.c
78
bio->bi_opf |= REQ_INTEGRITY;
block/bio-integrity.c
91
struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
block/bio-integrity.c
97
if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
block/bio.c
1021
void __bio_add_page(struct bio *bio, struct page *page,
block/bio.c
1024
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
block/bio.c
1025
WARN_ON_ONCE(bio_full(bio, len));
block/bio.c
1028
bio->bi_opf |= REQ_NOMERGE;
block/bio.c
1030
bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off);
block/bio.c
1031
bio->bi_iter.bi_size += len;
block/bio.c
1032
bio->bi_vcnt++;
block/bio.c
1046
void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len)
block/bio.c
1048
__bio_add_page(bio, virt_to_page(vaddr), len, offset_in_page(vaddr));
block/bio.c
1062
int bio_add_page(struct bio *bio, struct page *page,
block/bio.c
1065
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
block/bio.c
1067
if (bio->bi_iter.bi_size > BIO_MAX_SIZE - len)
block/bio.c
1070
if (bio->bi_vcnt > 0) {
block/bio.c
1071
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
block/bio.c
1077
bio->bi_iter.bi_size += len;
block/bio.c
1082
if (bio->bi_vcnt >= bio->bi_max_vecs)
block/bio.c
1084
__bio_add_page(bio, page, len, offset);
block/bio.c
1089
void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
block/bio.c
1095
__bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE);
block/bio.c
1113
bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
block/bio.c
1120
return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0;
block/bio.c
1138
unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len)
block/bio.c
114
return bs->front_pad + sizeof(struct bio) + bs->back_pad;
block/bio.c
1143
if (bio_add_page(bio, vmalloc_to_page(vaddr), len, offset) < len)
block/bio.c
1145
if (op_is_write(bio_op(bio)))
block/bio.c
1164
bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len)
block/bio.c
1167
unsigned int added = bio_add_vmalloc_chunk(bio, vaddr, len);
block/bio.c
1179
void __bio_release_pages(struct bio *bio, bool mark_dirty)
block/bio.c
1183
bio_for_each_folio_all(fi, bio) {
block/bio.c
1198
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter)
block/bio.c
1200
WARN_ON_ONCE(bio->bi_max_vecs);
block/bio.c
1202
bio->bi_io_vec = (struct bio_vec *)iter->bvec;
block/bio.c
1203
bio->bi_iter.bi_idx = 0;
block/bio.c
1204
bio->bi_iter.bi_bvec_done = iter->iov_offset;
block/bio.c
1205
bio->bi_iter.bi_size = iov_iter_count(iter);
block/bio.c
1206
bio_set_flag(bio, BIO_CLONED);
block/bio.c
1214
static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter,
block/bio.c
1217
size_t nbytes = bio->bi_iter.bi_size & len_align_mask;
block/bio.c
1223
bio->bi_iter.bi_size -= nbytes;
block/bio.c
1225
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
block/bio.c
1232
if (bio_flagged(bio, BIO_PAGE_PINNED))
block/bio.c
1235
bio->bi_vcnt--;
block/bio.c
1239
if (!bio->bi_vcnt)
block/bio.c
1265
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
block/bio.c
1270
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
block/bio.c
1274
bio_iov_bvec_set(bio, iter);
block/bio.c
1275
iov_iter_advance(iter, bio->bi_iter.bi_size);
block/bio.c
1280
bio_set_flag(bio, BIO_PAGE_PINNED);
block/bio.c
1281
if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
block/bio.c
1287
ret = iov_iter_extract_bvecs(iter, bio->bi_io_vec,
block/bio.c
1288
BIO_MAX_SIZE - bio->bi_iter.bi_size,
block/bio.c
1289
&bio->bi_vcnt, bio->bi_max_vecs, flags);
block/bio.c
1291
if (!bio->bi_vcnt)
block/bio.c
1295
bio->bi_iter.bi_size += ret;
block/bio.c
1296
} while (iov_iter_count(iter) && !bio_full(bio, 0));
block/bio.c
1298
if (is_pci_p2pdma_page(bio->bi_io_vec->bv_page))
block/bio.c
1299
bio->bi_opf |= REQ_NOMERGE;
block/bio.c
1300
return bio_iov_iter_align_down(bio, iter, len_align_mask);
block/bio.c
1317
static void bio_free_folios(struct bio *bio)
block/bio.c
1322
bio_for_each_bvec_all(bv, bio, i) {
block/bio.c
1330
static int bio_iov_iter_bounce_write(struct bio *bio, struct iov_iter *iter)
block/bio.c
1334
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
block/bio.c
1336
if (WARN_ON_ONCE(bio->bi_iter.bi_size))
block/bio.c
1338
if (WARN_ON_ONCE(bio->bi_vcnt >= bio->bi_max_vecs))
block/bio.c
1348
if (bio->bi_iter.bi_size > BIO_MAX_SIZE - this_len)
block/bio.c
1354
bio_add_folio_nofail(bio, folio, this_len, 0);
block/bio.c
1358
bio_free_folios(bio);
block/bio.c
1363
} while (total_len && bio->bi_vcnt < bio->bi_max_vecs);
block/bio.c
1365
if (!bio->bi_iter.bi_size)
block/bio.c
1370
static int bio_iov_iter_bounce_read(struct bio *bio, struct iov_iter *iter)
block/bio.c
1382
ret = iov_iter_extract_bvecs(iter, bio->bi_io_vec + 1, len,
block/bio.c
1383
&bio->bi_vcnt, bio->bi_max_vecs - 1, 0);
block/bio.c
1385
if (!bio->bi_vcnt) {
block/bio.c
1392
bio->bi_iter.bi_size += ret;
block/bio.c
1393
} while (len && bio->bi_vcnt < bio->bi_max_vecs - 1);
block/bio.c
1401
bvec_set_folio(&bio->bi_io_vec[0], folio, bio->bi_iter.bi_size, 0);
block/bio.c
1403
bio_set_flag(bio, BIO_PAGE_PINNED);
block/bio.c
1418
int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter)
block/bio.c
1420
if (op_is_write(bio_op(bio)))
block/bio.c
1421
return bio_iov_iter_bounce_write(bio, iter);
block/bio.c
1422
return bio_iov_iter_bounce_read(bio, iter);
block/bio.c
1436
static void bio_iov_iter_unbounce_read(struct bio *bio, bool is_error,
block/bio.c
1439
unsigned int len = bio->bi_io_vec[0].bv_len;
block/bio.c
1442
void *buf = bvec_virt(&bio->bi_io_vec[0]);
block/bio.c
1445
iov_iter_bvec(&to, ITER_DEST, bio->bi_io_vec + 1, bio->bi_vcnt,
block/bio.c
1454
if (bio_flagged(bio, BIO_PAGE_PINNED)) {
block/bio.c
1457
for (i = 0; i < bio->bi_vcnt; i++)
block/bio.c
1458
bvec_unpin(&bio->bi_io_vec[1 + i], mark_dirty);
block/bio.c
1461
folio_put(page_folio(bio->bi_io_vec[0].bv_page));
block/bio.c
1476
void bio_iov_iter_unbounce(struct bio *bio, bool is_error, bool mark_dirty)
block/bio.c
1478
if (op_is_write(bio_op(bio)))
block/bio.c
1479
bio_free_folios(bio);
block/bio.c
1481
bio_iov_iter_unbounce_read(bio, is_error, mark_dirty);
block/bio.c
1484
static void submit_bio_wait_endio(struct bio *bio)
block/bio.c
1486
complete(bio->bi_private);
block/bio.c
1500
int submit_bio_wait(struct bio *bio)
block/bio.c
1503
bio->bi_bdev->bd_disk->lockdep_map);
block/bio.c
1505
bio->bi_private = &done;
block/bio.c
1506
bio->bi_end_io = submit_bio_wait_endio;
block/bio.c
1507
bio->bi_opf |= REQ_SYNC;
block/bio.c
1508
submit_bio(bio);
block/bio.c
1511
return blk_status_to_errno(bio->bi_status);
block/bio.c
1530
struct bio bio;
block/bio.c
1536
bio_init(&bio, bdev, &bv, 1, op);
block/bio.c
1537
bio.bi_iter.bi_sector = sector;
block/bio.c
1538
bio_add_virt_nofail(&bio, data, len);
block/bio.c
1539
error = submit_bio_wait(&bio);
block/bio.c
1540
bio_uninit(&bio);
block/bio.c
1545
static void bio_wait_end_io(struct bio *bio)
block/bio.c
1547
complete(bio->bi_private);
block/bio.c
1548
bio_put(bio);
block/bio.c
1554
void bio_await_chain(struct bio *bio)
block/bio.c
1557
bio->bi_bdev->bd_disk->lockdep_map);
block/bio.c
1559
bio->bi_private = &done;
block/bio.c
1560
bio->bi_end_io = bio_wait_end_io;
block/bio.c
1561
bio_endio(bio);
block/bio.c
1565
void __bio_advance(struct bio *bio, unsigned bytes)
block/bio.c
1567
if (bio_integrity(bio))
block/bio.c
1568
bio_integrity_advance(bio, bytes);
block/bio.c
1570
bio_crypt_advance(bio, bytes);
block/bio.c
1571
bio_advance_iter(bio, &bio->bi_iter, bytes);
block/bio.c
1575
void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
block/bio.c
1576
struct bio *src, struct bvec_iter *src_iter)
block/bio.c
1604
void bio_copy_data(struct bio *dst, struct bio *src)
block/bio.c
1613
void bio_free_pages(struct bio *bio)
block/bio.c
1618
bio_for_each_segment_all(bvec, bio, iter_all)
block/bio.c
1646
void bio_set_pages_dirty(struct bio *bio)
block/bio.c
1650
bio_for_each_folio_all(fi, bio) {
block/bio.c
1673
static struct bio *bio_dirty_list;
block/bio.c
1680
struct bio *bio, *next;
block/bio.c
1687
while ((bio = next) != NULL) {
block/bio.c
1688
next = bio->bi_private;
block/bio.c
1690
bio_release_pages(bio, true);
block/bio.c
1691
bio_put(bio);
block/bio.c
1695
void bio_check_pages_dirty(struct bio *bio)
block/bio.c
1700
bio_for_each_folio_all(fi, bio) {
block/bio.c
1705
bio_release_pages(bio, false);
block/bio.c
1706
bio_put(bio);
block/bio.c
1710
bio->bi_private = bio_dirty_list;
block/bio.c
1711
bio_dirty_list = bio;
block/bio.c
1717
static inline bool bio_remaining_done(struct bio *bio)
block/bio.c
1723
if (!bio_flagged(bio, BIO_CHAIN))
block/bio.c
1726
BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
block/bio.c
1728
if (atomic_dec_and_test(&bio->__bi_remaining)) {
block/bio.c
1729
bio_clear_flag(bio, BIO_CHAIN);
block/bio.c
1749
void bio_endio(struct bio *bio)
block/bio.c
1752
if (!bio_remaining_done(bio))
block/bio.c
1754
if (!bio_integrity_endio(bio))
block/bio.c
1757
blk_zone_bio_endio(bio);
block/bio.c
1759
rq_qos_done_bio(bio);
block/bio.c
1761
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
block/bio.c
1762
trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
block/bio.c
1763
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
block/bio.c
1774
if (bio->bi_end_io == bio_chain_endio) {
block/bio.c
1775
bio = __bio_chain_endio(bio);
block/bio.c
1785
if (bio->bi_blkg) {
block/bio.c
1786
blkg_put(bio->bi_blkg);
block/bio.c
1787
bio->bi_blkg = NULL;
block/bio.c
1791
if (bio->bi_end_io)
block/bio.c
1792
bio->bi_end_io(bio);
block/bio.c
1810
struct bio *bio_split(struct bio *bio, int sectors,
block/bio.c
1813
struct bio *split;
block/bio.c
1817
if (WARN_ON_ONCE(sectors >= bio_sectors(bio)))
block/bio.c
1821
if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
block/bio.c
1825
if (bio->bi_opf & REQ_ATOMIC)
block/bio.c
1828
split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
block/bio.c
1837
bio_advance(bio, split->bi_iter.bi_size);
block/bio.c
1839
if (bio_flagged(bio, BIO_TRACE_COMPLETION))
block/bio.c
1855
void bio_trim(struct bio *bio, sector_t offset, sector_t size)
block/bio.c
1858
if (WARN_ON_ONCE(bio->bi_opf & REQ_ATOMIC && size))
block/bio.c
1862
offset + size > bio_sectors(bio)))
block/bio.c
1866
if (offset == 0 && size == bio->bi_iter.bi_size)
block/bio.c
1869
bio_advance(bio, offset << 9);
block/bio.c
1870
bio->bi_iter.bi_size = size;
block/bio.c
1872
if (bio_integrity(bio))
block/bio.c
1873
bio_integrity_trim(bio);
block/bio.c
1981
BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags));
block/bio.c
213
void bio_uninit(struct bio *bio)
block/bio.c
216
if (bio->bi_blkg) {
block/bio.c
217
blkg_put(bio->bi_blkg);
block/bio.c
218
bio->bi_blkg = NULL;
block/bio.c
221
if (bio_integrity(bio))
block/bio.c
222
bio_integrity_free(bio);
block/bio.c
224
bio_crypt_free_ctx(bio);
block/bio.c
228
static void bio_free(struct bio *bio)
block/bio.c
230
struct bio_set *bs = bio->bi_pool;
block/bio.c
231
void *p = bio;
block/bio.c
235
bio_uninit(bio);
block/bio.c
236
bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
block/bio.c
245
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
block/bio.c
248
bio->bi_next = NULL;
block/bio.c
249
bio->bi_bdev = bdev;
block/bio.c
250
bio->bi_opf = opf;
block/bio.c
251
bio->bi_flags = 0;
block/bio.c
252
bio->bi_ioprio = 0;
block/bio.c
253
bio->bi_write_hint = 0;
block/bio.c
254
bio->bi_write_stream = 0;
block/bio.c
255
bio->bi_status = 0;
block/bio.c
256
bio->bi_bvec_gap_bit = 0;
block/bio.c
257
bio->bi_iter.bi_sector = 0;
block/bio.c
258
bio->bi_iter.bi_size = 0;
block/bio.c
259
bio->bi_iter.bi_idx = 0;
block/bio.c
260
bio->bi_iter.bi_bvec_done = 0;
block/bio.c
261
bio->bi_end_io = NULL;
block/bio.c
262
bio->bi_private = NULL;
block/bio.c
264
bio->bi_blkg = NULL;
block/bio.c
265
bio->issue_time_ns = 0;
block/bio.c
267
bio_associate_blkg(bio);
block/bio.c
269
bio->bi_iocost_cost = 0;
block/bio.c
273
bio->bi_crypt_context = NULL;
block/bio.c
276
bio->bi_integrity = NULL;
block/bio.c
278
bio->bi_vcnt = 0;
block/bio.c
280
atomic_set(&bio->__bi_remaining, 1);
block/bio.c
281
atomic_set(&bio->__bi_cnt, 1);
block/bio.c
282
bio->bi_cookie = BLK_QC_T_NONE;
block/bio.c
284
bio->bi_max_vecs = max_vecs;
block/bio.c
285
bio->bi_io_vec = table;
block/bio.c
286
bio->bi_pool = NULL;
block/bio.c
302
void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
block/bio.c
304
struct bio_vec *bv = bio->bi_io_vec;
block/bio.c
306
bio_uninit(bio);
block/bio.c
307
memset(bio, 0, BIO_RESET_BYTES);
block/bio.c
308
atomic_set(&bio->__bi_remaining, 1);
block/bio.c
309
bio->bi_io_vec = bv;
block/bio.c
31
struct bio *free_list;
block/bio.c
310
bio->bi_bdev = bdev;
block/bio.c
311
if (bio->bi_bdev)
block/bio.c
312
bio_associate_blkg(bio);
block/bio.c
313
bio->bi_opf = opf;
block/bio.c
32
struct bio *free_list_irq;
block/bio.c
332
void bio_reuse(struct bio *bio, blk_opf_t opf)
block/bio.c
334
unsigned short vcnt = bio->bi_vcnt, i;
block/bio.c
335
bio_end_io_t *end_io = bio->bi_end_io;
block/bio.c
336
void *private = bio->bi_private;
block/bio.c
338
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
block/bio.c
339
WARN_ON_ONCE(bio_integrity(bio));
block/bio.c
340
WARN_ON_ONCE(bio_has_crypt_ctx(bio));
block/bio.c
342
bio_reset(bio, bio->bi_bdev, opf);
block/bio.c
344
bio->bi_iter.bi_size += bio->bi_io_vec[i].bv_len;
block/bio.c
345
bio->bi_vcnt = vcnt;
block/bio.c
346
bio->bi_private = private;
block/bio.c
347
bio->bi_end_io = end_io;
block/bio.c
351
static struct bio *__bio_chain_endio(struct bio *bio)
block/bio.c
353
struct bio *parent = bio->bi_private;
block/bio.c
355
if (bio->bi_status && !parent->bi_status)
block/bio.c
356
parent->bi_status = bio->bi_status;
block/bio.c
357
bio_put(bio);
block/bio.c
365
static void bio_chain_endio(struct bio *bio)
block/bio.c
381
void bio_chain(struct bio *bio, struct bio *parent)
block/bio.c
383
BUG_ON(bio->bi_private || bio->bi_end_io);
block/bio.c
385
bio->bi_private = parent;
block/bio.c
386
bio->bi_end_io = bio_chain_endio;
block/bio.c
400
struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new)
block/bio.c
409
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
block/bio.c
412
return bio_chain_and_submit(bio, bio_alloc(bdev, nr_pages, opf, gfp));
block/bio.c
419
struct bio *bio;
block/bio.c
423
bio = bio_list_pop(&bs->rescue_list);
block/bio.c
426
if (!bio)
block/bio.c
429
submit_bio_noacct(bio);
block/bio.c
436
struct bio *bio;
block/bio.c
454
while ((bio = bio_list_pop(&current->bio_list[0])))
block/bio.c
455
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
block/bio.c
459
while ((bio = bio_list_pop(&current->bio_list[1])))
block/bio.c
460
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
block/bio.c
486
static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
block/bio.c
491
struct bio *bio;
block/bio.c
502
bio = cache->free_list;
block/bio.c
503
cache->free_list = bio->bi_next;
block/bio.c
508
bio_init_inline(bio, bdev, nr_vecs, opf);
block/bio.c
510
bio_init(bio, bdev, NULL, nr_vecs, opf);
block/bio.c
511
bio->bi_pool = bs;
block/bio.c
512
return bio;
block/bio.c
549
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
block/bio.c
554
struct bio *bio;
block/bio.c
563
bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
block/bio.c
565
if (bio)
block/bio.c
566
return bio;
block/bio.c
609
bio = p + bs->front_pad;
block/bio.c
622
bio_init(bio, bdev, bvl, nr_vecs, opf);
block/bio.c
624
bio_init_inline(bio, bdev, BIO_INLINE_VECS, opf);
block/bio.c
626
bio_init(bio, bdev, NULL, 0, opf);
block/bio.c
629
bio->bi_pool = bs;
block/bio.c
630
return bio;
block/bio.c
654
struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
block/bio.c
656
struct bio *bio;
block/bio.c
660
return kmalloc(sizeof(*bio) + nr_vecs * sizeof(struct bio_vec),
block/bio.c
665
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
block/bio.c
670
__bio_for_each_segment(bv, bio, iter, start)
block/bio.c
685
static void bio_truncate(struct bio *bio, unsigned new_size)
block/bio.c
692
if (new_size >= bio->bi_iter.bi_size)
block/bio.c
695
if (bio_op(bio) != REQ_OP_READ)
block/bio.c
698
bio_for_each_segment(bv, bio, iter) {
block/bio.c
722
bio->bi_iter.bi_size = new_size;
block/bio.c
737
void guard_bio_eod(struct bio *bio)
block/bio.c
739
sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
block/bio.c
749
if (unlikely(bio->bi_iter.bi_sector >= maxsector))
block/bio.c
752
maxsector -= bio->bi_iter.bi_sector;
block/bio.c
753
if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
block/bio.c
756
bio_truncate(bio, maxsector << 9);
block/bio.c
763
struct bio *bio;
block/bio.c
765
while ((bio = cache->free_list) != NULL) {
block/bio.c
766
cache->free_list = bio->bi_next;
block/bio.c
768
bio_free(bio);
block/bio.c
816
static inline void bio_put_percpu_cache(struct bio *bio)
block/bio.c
820
cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
block/bio.c
825
bio_uninit(bio);
block/bio.c
826
bio->bi_next = cache->free_list;
block/bio.c
828
bio->bi_bdev = NULL;
block/bio.c
829
cache->free_list = bio;
block/bio.c
834
bio_uninit(bio);
block/bio.c
835
bio->bi_next = cache->free_list_irq;
block/bio.c
836
cache->free_list_irq = bio;
block/bio.c
845
bio_free(bio);
block/bio.c
856
void bio_put(struct bio *bio)
block/bio.c
858
if (unlikely(bio_flagged(bio, BIO_REFFED))) {
block/bio.c
859
BUG_ON(!atomic_read(&bio->__bi_cnt));
block/bio.c
860
if (!atomic_dec_and_test(&bio->__bi_cnt))
block/bio.c
863
if (bio->bi_opf & REQ_ALLOC_CACHE)
block/bio.c
864
bio_put_percpu_cache(bio);
block/bio.c
866
bio_free(bio);
block/bio.c
870
static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
block/bio.c
872
bio_set_flag(bio, BIO_CLONED);
block/bio.c
873
bio->bi_ioprio = bio_src->bi_ioprio;
block/bio.c
874
bio->bi_write_hint = bio_src->bi_write_hint;
block/bio.c
875
bio->bi_write_stream = bio_src->bi_write_stream;
block/bio.c
876
bio->bi_iter = bio_src->bi_iter;
block/bio.c
878
if (bio->bi_bdev) {
block/bio.c
879
if (bio->bi_bdev == bio_src->bi_bdev &&
block/bio.c
881
bio_set_flag(bio, BIO_REMAPPED);
block/bio.c
882
bio_clone_blkg_association(bio, bio_src);
block/bio.c
885
if (bio_crypt_clone(bio, bio_src, gfp) < 0)
block/bio.c
888
bio_integrity_clone(bio, bio_src, gfp) < 0)
block/bio.c
905
struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
block/bio.c
908
struct bio *bio;
block/bio.c
910
bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
block/bio.c
911
if (!bio)
block/bio.c
914
if (__bio_clone(bio, bio_src, gfp) < 0) {
block/bio.c
915
bio_put(bio);
block/bio.c
918
bio->bi_io_vec = bio_src->bi_io_vec;
block/bio.c
920
return bio;
block/bio.c
936
int bio_init_clone(struct block_device *bdev, struct bio *bio,
block/bio.c
937
struct bio *bio_src, gfp_t gfp)
block/bio.c
941
bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
block/bio.c
942
ret = __bio_clone(bio, bio_src, gfp);
block/bio.c
944
bio_uninit(bio);
block/bio.c
957
static inline bool bio_full(struct bio *bio, unsigned len)
block/bio.c
959
if (bio->bi_vcnt >= bio->bi_max_vecs)
block/bio.c
961
if (bio->bi_iter.bi_size > BIO_MAX_SIZE - len)
block/blk-cgroup-fc-appid.c
51
char *blkcg_get_fc_appid(struct bio *bio)
block/blk-cgroup-fc-appid.c
53
if (!bio->bi_blkg || bio->bi_blkg->blkcg->fc_app_id[0] == '\0')
block/blk-cgroup-fc-appid.c
55
return bio->bi_blkg->blkcg->fc_app_id;
block/blk-cgroup.c
209
struct bio *bio;
block/blk-cgroup.c
2090
static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
block/blk-cgroup.c
2096
blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk);
block/blk-cgroup.c
2123
void bio_associate_blkg_from_css(struct bio *bio,
block/blk-cgroup.c
2126
if (bio->bi_blkg)
block/blk-cgroup.c
2127
blkg_put(bio->bi_blkg);
block/blk-cgroup.c
2130
bio->bi_blkg = blkg_tryget_closest(bio, css);
block/blk-cgroup.c
2132
blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg);
block/blk-cgroup.c
2133
bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg;
block/blk-cgroup.c
2147
void bio_associate_blkg(struct bio *bio)
block/blk-cgroup.c
2151
if (blk_op_is_passthrough(bio->bi_opf))
block/blk-cgroup.c
2156
if (bio->bi_blkg)
block/blk-cgroup.c
2157
css = bio_blkcg_css(bio);
block/blk-cgroup.c
2161
bio_associate_blkg_from_css(bio, css);
block/blk-cgroup.c
2172
void bio_clone_blkg_association(struct bio *dst, struct bio *src)
block/blk-cgroup.c
2179
static int blk_cgroup_io_type(struct bio *bio)
block/blk-cgroup.c
2181
if (op_is_discard(bio->bi_opf))
block/blk-cgroup.c
2183
if (op_is_write(bio->bi_opf))
block/blk-cgroup.c
2188
void blk_cgroup_bio_start(struct bio *bio)
block/blk-cgroup.c
2190
struct blkcg *blkcg = bio->bi_blkg->blkcg;
block/blk-cgroup.c
2191
int rwd = blk_cgroup_io_type(bio), cpu;
block/blk-cgroup.c
2203
bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
block/blk-cgroup.c
2210
if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
block/blk-cgroup.c
2211
bio_set_flag(bio, BIO_CGROUP_ACCT);
block/blk-cgroup.c
2212
bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
block/blk-cgroup.c
223
while ((bio = bio_list_pop(&bios)))
block/blk-cgroup.c
224
submit_bio(bio);
block/blk-cgroup.c
235
void blkcg_punt_bio_submit(struct bio *bio)
block/blk-cgroup.c
237
struct blkcg_gq *blkg = bio->bi_blkg;
block/blk-cgroup.c
241
bio_list_add(&blkg->async_bios, bio);
block/blk-cgroup.c
246
submit_bio(bio);
block/blk-cgroup.c
271
struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
block/blk-cgroup.c
273
if (!bio || !bio->bi_blkg)
block/blk-cgroup.c
275
return &bio->bi_blkg->blkcg->css;
block/blk-cgroup.h
241
static inline bool bio_issue_as_root_blkg(struct bio *bio)
block/blk-cgroup.h
243
return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
block/blk-cgroup.h
451
static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
block/blk-cgroup.h
453
return rq->bio->bi_blkg == bio->bi_blkg &&
block/blk-cgroup.h
454
bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
block/blk-cgroup.h
463
void blk_cgroup_bio_start(struct bio *bio);
block/blk-cgroup.h
495
static inline void blk_cgroup_bio_start(struct bio *bio) { }
block/blk-cgroup.h
496
static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
block/blk-core.c
1015
bio = READ_ONCE(kiocb->private);
block/blk-core.c
1016
if (bio)
block/blk-core.c
1017
ret = bio_poll(bio, iob, flags);
block/blk-core.c
1058
unsigned long bio_start_io_acct(struct bio *bio)
block/blk-core.c
1060
return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies);
block/blk-core.c
1081
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
block/blk-core.c
1084
bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time);
block/blk-core.c
1281
sizeof_field(struct bio, bi_opf));
block/blk-core.c
333
int __bio_queue_enter(struct request_queue *q, struct bio *bio)
block/blk-core.c
336
struct gendisk *disk = bio->bi_bdev->bd_disk;
block/blk-core.c
338
if (bio->bi_opf & REQ_NOWAIT) {
block/blk-core.c
341
bio_wouldblock_error(bio);
block/blk-core.c
365
bio_io_error(bio);
block/blk-core.c
523
static inline void bio_check_ro(struct bio *bio)
block/blk-core.c
525
if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
block/blk-core.c
526
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
block/blk-core.c
529
if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED))
block/blk-core.c
532
bdev_set_flag(bio->bi_bdev, BD_RO_WARNED);
block/blk-core.c
539
bio->bi_bdev);
block/blk-core.c
543
int should_fail_bio(struct bio *bio)
block/blk-core.c
545
if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
block/blk-core.c
556
static inline int bio_check_eod(struct bio *bio)
block/blk-core.c
558
sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
block/blk-core.c
559
unsigned int nr_sectors = bio_sectors(bio);
block/blk-core.c
563
bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
block/blk-core.c
568
current->comm, bio->bi_bdev, bio->bi_opf,
block/blk-core.c
569
bio->bi_iter.bi_sector, nr_sectors, maxsector);
block/blk-core.c
578
static int blk_partition_remap(struct bio *bio)
block/blk-core.c
580
struct block_device *p = bio->bi_bdev;
block/blk-core.c
582
if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
block/blk-core.c
584
if (bio_sectors(bio)) {
block/blk-core.c
585
bio->bi_iter.bi_sector += p->bd_start_sect;
block/blk-core.c
586
trace_block_bio_remap(bio, p->bd_dev,
block/blk-core.c
587
bio->bi_iter.bi_sector -
block/blk-core.c
590
bio_set_flag(bio, BIO_REMAPPED);
block/blk-core.c
598
struct bio *bio)
block/blk-core.c
600
int nr_sectors = bio_sectors(bio);
block/blk-core.c
603
if (!bdev_is_zoned(bio->bi_bdev))
block/blk-core.c
607
if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector))
block/blk-core.c
622
bio->bi_opf |= REQ_NOMERGE;
block/blk-core.c
627
static void __submit_bio(struct bio *bio)
block/blk-core.c
634
if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
block/blk-core.c
635
blk_mq_submit_bio(bio);
block/blk-core.c
636
} else if (likely(bio_queue_enter(bio) == 0)) {
block/blk-core.c
637
struct gendisk *disk = bio->bi_bdev->bd_disk;
block/blk-core.c
639
if ((bio->bi_opf & REQ_POLLED) &&
block/blk-core.c
641
bio->bi_status = BLK_STS_NOTSUPP;
block/blk-core.c
642
bio_endio(bio);
block/blk-core.c
644
disk->fops->submit_bio(bio);
block/blk-core.c
671
static void __submit_bio_noacct(struct bio *bio)
block/blk-core.c
675
BUG_ON(bio->bi_next);
block/blk-core.c
681
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
block/blk-core.c
690
__submit_bio(bio);
block/blk-core.c
698
while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
block/blk-core.c
699
if (q == bdev_get_queue(bio->bi_bdev))
block/blk-core.c
700
bio_list_add(&same, bio);
block/blk-core.c
702
bio_list_add(&lower, bio);
block/blk-core.c
710
} while ((bio = bio_list_pop(&bio_list_on_stack[0])));
block/blk-core.c
715
static void __submit_bio_noacct_mq(struct bio *bio)
block/blk-core.c
722
__submit_bio(bio);
block/blk-core.c
723
} while ((bio = bio_list_pop(&bio_list[0])));
block/blk-core.c
728
void submit_bio_noacct_nocheck(struct bio *bio, bool split)
block/blk-core.c
730
blk_cgroup_bio_start(bio);
block/blk-core.c
732
if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
block/blk-core.c
733
trace_block_bio_queue(bio);
block/blk-core.c
738
bio_set_flag(bio, BIO_TRACE_COMPLETION);
block/blk-core.c
749
bio_list_add_head(&current->bio_list[0], bio);
block/blk-core.c
751
bio_list_add(&current->bio_list[0], bio);
block/blk-core.c
752
} else if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
block/blk-core.c
753
__submit_bio_noacct_mq(bio);
block/blk-core.c
755
__submit_bio_noacct(bio);
block/blk-core.c
760
struct bio *bio)
block/blk-core.c
762
if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q))
block/blk-core.c
765
if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q))
block/blk-core.c
780
void submit_bio_noacct(struct bio *bio)
block/blk-core.c
782
struct block_device *bdev = bio->bi_bdev;
block/blk-core.c
792
if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
block/blk-core.c
795
if (bio_has_crypt_ctx(bio)) {
block/blk-core.c
796
if (WARN_ON_ONCE(!bio_has_data(bio)))
block/blk-core.c
798
if (!blk_crypto_supported(bio))
block/blk-core.c
802
if (should_fail_bio(bio))
block/blk-core.c
804
bio_check_ro(bio);
block/blk-core.c
805
if (!bio_flagged(bio, BIO_REMAPPED)) {
block/blk-core.c
806
if (unlikely(bio_check_eod(bio)))
block/blk-core.c
809
unlikely(blk_partition_remap(bio)))
block/blk-core.c
817
if (op_is_flush(bio->bi_opf)) {
block/blk-core.c
818
if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE &&
block/blk-core.c
819
bio_op(bio) != REQ_OP_ZONE_APPEND))
block/blk-core.c
822
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
block/blk-core.c
823
if (!bio_sectors(bio)) {
block/blk-core.c
830
switch (bio_op(bio)) {
block/blk-core.c
834
if (bio->bi_opf & REQ_ATOMIC) {
block/blk-core.c
835
status = blk_validate_atomic_write_op_size(q, bio);
block/blk-core.c
855
status = blk_check_zone_append(q, bio);
block/blk-core.c
868
if (!bdev_is_zoned(bio->bi_bdev))
block/blk-core.c
882
if (blk_throtl_bio(bio))
block/blk-core.c
884
submit_bio_noacct_nocheck(bio, false);
block/blk-core.c
890
bio->bi_status = status;
block/blk-core.c
891
bio_endio(bio);
block/blk-core.c
895
static void bio_set_ioprio(struct bio *bio)
block/blk-core.c
898
if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
block/blk-core.c
899
bio->bi_ioprio = get_current_ioprio();
block/blk-core.c
900
blkcg_set_ioprio(bio);
block/blk-core.c
916
void submit_bio(struct bio *bio)
block/blk-core.c
918
if (bio_op(bio) == REQ_OP_READ) {
block/blk-core.c
919
task_io_account_read(bio->bi_iter.bi_size);
block/blk-core.c
920
count_vm_events(PGPGIN, bio_sectors(bio));
block/blk-core.c
921
} else if (bio_op(bio) == REQ_OP_WRITE) {
block/blk-core.c
922
count_vm_events(PGPGOUT, bio_sectors(bio));
block/blk-core.c
925
bio_set_ioprio(bio);
block/blk-core.c
926
submit_bio_noacct(bio);
block/blk-core.c
942
int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
block/blk-core.c
944
blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
block/blk-core.c
949
bdev = READ_ONCE(bio->bi_bdev);
block/blk-core.c
977
ret = disk->fops->poll_bio(bio, iob, flags);
block/blk-core.c
991
struct bio *bio;
block/blk-crypto-fallback.c
144
static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
block/blk-crypto-fallback.c
146
struct bio *src_bio = enc_bio->bi_private;
block/blk-crypto-fallback.c
172
static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src,
block/blk-crypto-fallback.c
178
struct bio *bio;
block/blk-crypto-fallback.c
180
bio = bio_alloc_bioset(bio_src->bi_bdev, nr_segs, bio_src->bi_opf,
block/blk-crypto-fallback.c
183
bio_set_flag(bio, BIO_REMAPPED);
block/blk-crypto-fallback.c
184
bio->bi_private = bio_src;
block/blk-crypto-fallback.c
185
bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
block/blk-crypto-fallback.c
186
bio->bi_ioprio = bio_src->bi_ioprio;
block/blk-crypto-fallback.c
187
bio->bi_write_hint = bio_src->bi_write_hint;
block/blk-crypto-fallback.c
188
bio->bi_write_stream = bio_src->bi_write_stream;
block/blk-crypto-fallback.c
189
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
block/blk-crypto-fallback.c
190
bio_clone_blkg_association(bio, bio_src);
block/blk-crypto-fallback.c
198
pages = (struct page **)bio->bi_io_vec;
block/blk-crypto-fallback.c
215
return bio;
block/blk-crypto-fallback.c
241
static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
block/blk-crypto-fallback.c
252
struct bio *enc_bio;
block/blk-crypto-fallback.c
355
static void blk_crypto_fallback_encrypt_bio(struct bio *src_bio)
block/blk-crypto-fallback.c
373
static blk_status_t __blk_crypto_fallback_decrypt_bio(struct bio *bio,
block/blk-crypto-fallback.c
395
__bio_for_each_segment(bv, bio, iter, iter) {
block/blk-crypto-fallback.c
425
struct bio *bio = f_ctx->bio;
block/blk-crypto-fallback.c
433
status = __blk_crypto_fallback_decrypt_bio(bio, bc,
block/blk-crypto-fallback.c
440
bio->bi_status = status;
block/blk-crypto-fallback.c
441
bio_endio(bio);
block/blk-crypto-fallback.c
452
static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
block/blk-crypto-fallback.c
454
struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private;
block/blk-crypto-fallback.c
456
bio->bi_private = f_ctx->bi_private_orig;
block/blk-crypto-fallback.c
457
bio->bi_end_io = f_ctx->bi_end_io_orig;
block/blk-crypto-fallback.c
460
if (bio->bi_status) {
block/blk-crypto-fallback.c
462
bio_endio(bio);
block/blk-crypto-fallback.c
467
f_ctx->bio = bio;
block/blk-crypto-fallback.c
489
bool blk_crypto_fallback_bio_prep(struct bio *bio)
block/blk-crypto-fallback.c
491
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
block/blk-crypto-fallback.c
496
bio_io_error(bio);
block/blk-crypto-fallback.c
502
bio->bi_status = BLK_STS_NOTSUPP;
block/blk-crypto-fallback.c
503
bio_endio(bio);
block/blk-crypto-fallback.c
507
if (bio_data_dir(bio) == WRITE) {
block/blk-crypto-fallback.c
508
blk_crypto_fallback_encrypt_bio(bio);
block/blk-crypto-fallback.c
518
f_ctx->crypt_iter = bio->bi_iter;
block/blk-crypto-fallback.c
519
f_ctx->bi_private_orig = bio->bi_private;
block/blk-crypto-fallback.c
52
struct bio *bio;
block/blk-crypto-fallback.c
520
f_ctx->bi_end_io_orig = bio->bi_end_io;
block/blk-crypto-fallback.c
521
bio->bi_private = (void *)f_ctx;
block/blk-crypto-fallback.c
522
bio->bi_end_io = blk_crypto_fallback_decrypt_endio;
block/blk-crypto-fallback.c
523
bio_crypt_free_ctx(bio);
block/blk-crypto-internal.h
107
struct bio *bio)
block/blk-crypto-internal.h
113
struct bio *bio)
block/blk-crypto-internal.h
119
struct bio *bio)
block/blk-crypto-internal.h
148
static inline bool blk_crypto_supported(struct bio *bio)
block/blk-crypto-internal.h
155
void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
block/blk-crypto-internal.h
156
static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
block/blk-crypto-internal.h
158
if (bio_has_crypt_ctx(bio))
block/blk-crypto-internal.h
159
__bio_crypt_advance(bio, bytes);
block/blk-crypto-internal.h
162
void __bio_crypt_free_ctx(struct bio *bio);
block/blk-crypto-internal.h
163
static inline void bio_crypt_free_ctx(struct bio *bio)
block/blk-crypto-internal.h
165
if (bio_has_crypt_ctx(bio))
block/blk-crypto-internal.h
166
__bio_crypt_free_ctx(bio);
block/blk-crypto-internal.h
170
struct bio *bio)
block/blk-crypto-internal.h
173
if (bio_has_crypt_ctx(bio))
block/blk-crypto-internal.h
174
memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
block/blk-crypto-internal.h
201
int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
block/blk-crypto-internal.h
213
static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
block/blk-crypto-internal.h
216
if (bio_has_crypt_ctx(bio))
block/blk-crypto-internal.h
217
return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
block/blk-crypto-internal.h
221
bool blk_crypto_fallback_bio_prep(struct bio *bio);
block/blk-crypto-internal.h
32
bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
block/blk-crypto-internal.h
38
struct bio *bio)
block/blk-crypto-internal.h
41
bio->bi_crypt_context);
block/blk-crypto-internal.h
45
struct bio *bio)
block/blk-crypto-internal.h
47
return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
block/blk-crypto-internal.h
48
bio->bi_iter.bi_size, req->crypt_ctx);
block/blk-crypto-internal.h
89
static inline bool blk_crypto_supported(struct bio *bio)
block/blk-crypto-internal.h
91
return blk_crypto_config_supported_natively(bio->bi_bdev,
block/blk-crypto-internal.h
92
&bio->bi_crypt_context->bc_key->crypto_cfg);
block/blk-crypto.c
101
void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
block/blk-crypto.c
117
bio->bi_crypt_context = bc;
block/blk-crypto.c
120
void __bio_crypt_free_ctx(struct bio *bio)
block/blk-crypto.c
122
mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
block/blk-crypto.c
123
bio->bi_crypt_context = NULL;
block/blk-crypto.c
126
int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
block/blk-crypto.c
154
void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
block/blk-crypto.c
156
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
block/blk-crypto.c
203
bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
block/blk-crypto.c
205
return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
block/blk-crypto.c
251
bool __blk_crypto_submit_bio(struct bio *bio)
block/blk-crypto.c
253
const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
block/blk-crypto.c
254
struct block_device *bdev = bio->bi_bdev;
block/blk-crypto.c
257
if (WARN_ON_ONCE(!bio_has_data(bio))) {
block/blk-crypto.c
258
bio_io_error(bio);
block/blk-crypto.c
270
bio->bi_status = BLK_STS_NOTSUPP;
block/blk-crypto.c
271
bio_endio(bio);
block/blk-crypto.c
274
return blk_crypto_fallback_bio_prep(bio);
block/blk-crypto.c
281
int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
block/blk-crypto.c
289
*rq->crypt_ctx = *bio->bi_crypt_context;
block/blk-flush.c
115
rq->bio = rq->biotail;
block/blk-flush.c
116
if (rq->bio)
block/blk-flush.c
117
rq->__sector = rq->bio->bi_iter.bi_sector;
block/blk-flush.c
392
WARN_ON_ONCE(rq->bio != rq->biotail);
block/blk-flush.c
472
struct bio bio;
block/blk-flush.c
474
bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH);
block/blk-flush.c
475
return submit_bio_wait(&bio);
block/blk-integrity.c
130
ret = bio_integrity_map_user(rq->bio, &iter);
block/blk-integrity.c
134
rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
block/blk-integrity.c
151
bip = bio_integrity(req->bio);
block/blk-integrity.c
152
bip_next = bio_integrity(next->bio);
block/blk-integrity.c
164
if (integrity_req_gap_back_merge(req, next->bio))
block/blk-integrity.c
171
struct bio *bio)
block/blk-integrity.c
173
struct bio_integrity_payload *bip, *bip_bio = bio_integrity(bio);
block/blk-integrity.c
182
bip = bio_integrity(req->bio);
block/blk-integrity.c
190
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
block/blk-integrity.c
28
int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
block/blk-integrity.c
36
bio_for_each_integrity_vec(iv, bio, iter) {
block/blk-iocost.c
1480
iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
block/blk-iocost.c
2528
static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
block/blk-iocost.c
2533
u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
block/blk-iocost.c
2538
if (!bio->bi_iter.bi_size)
block/blk-iocost.c
2541
switch (bio_op(bio)) {
block/blk-iocost.c
2557
seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
block/blk-iocost.c
2573
static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
block/blk-iocost.c
2577
calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
block/blk-iocost.c
2606
static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
block/blk-iocost.c
2608
struct blkcg_gq *blkg = bio->bi_blkg;
block/blk-iocost.c
2622
abs_cost = calc_vtime_cost(bio, iocg, false);
block/blk-iocost.c
2629
iocg->cursor = bio_end_sector(bio);
block/blk-iocost.c
2640
iocg_commit_bio(iocg, bio, abs_cost, cost);
block/blk-iocost.c
2651
use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
block/blk-iocost.c
2665
iocg_commit_bio(iocg, bio, abs_cost, cost);
block/blk-iocost.c
2690
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
block/blk-iocost.c
2720
wait.bio = bio;
block/blk-iocost.c
2741
struct bio *bio)
block/blk-iocost.c
2743
struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
block/blk-iocost.c
2745
sector_t bio_end = bio_end_sector(bio);
block/blk-iocost.c
2754
abs_cost = calc_vtime_cost(bio, iocg, true);
block/blk-iocost.c
2772
if (rq->bio && rq->bio->bi_iocost_cost &&
block/blk-iocost.c
2774
iocg_commit_bio(iocg, bio, abs_cost, cost);
block/blk-iocost.c
2790
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
block/blk-iocost.c
2792
iocg_commit_bio(iocg, bio, abs_cost, cost);
block/blk-iocost.c
2799
static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
block/blk-iocost.c
2801
struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
block/blk-iocost.c
2803
if (iocg && bio->bi_iocost_cost)
block/blk-iocost.c
2804
atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
block/blk-iocost.c
567
struct bio *bio;
block/blk-iocost.c
717
static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
block/blk-iocost.c
722
bio->bi_iocost_cost = cost;
block/blk-iolatency.c
463
static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
block/blk-iolatency.c
466
struct blkcg_gq *blkg = bio->bi_blkg;
block/blk-iolatency.c
467
bool issue_as_root = bio_issue_as_root_blkg(bio);
block/blk-iolatency.c
481
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
block/blk-iolatency.c
583
static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
block/blk-iolatency.c
590
bool issue_as_root = bio_issue_as_root_blkg(bio);
block/blk-iolatency.c
593
blkg = bio->bi_blkg;
block/blk-iolatency.c
594
if (!blkg || !bio_flagged(bio, BIO_QOS_THROTTLED))
block/blk-iolatency.c
597
iolat = blkg_to_lat(bio->bi_blkg);
block/blk-iolatency.c
619
if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
block/blk-iolatency.c
620
iolatency_record_time(iolat, bio->issue_time_ns, now,
block/blk-ioprio.c
133
void blkcg_set_ioprio(struct bio *bio)
block/blk-ioprio.c
135
struct ioprio_blkcg *blkcg = blkcg_to_ioprio_blkcg(bio->bi_blkg->blkcg);
block/blk-ioprio.c
150
if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) != IOPRIO_CLASS_RT)
block/blk-ioprio.c
151
bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 4);
block/blk-ioprio.c
162
prio = max_t(u16, bio->bi_ioprio,
block/blk-ioprio.c
164
if (prio > bio->bi_ioprio)
block/blk-ioprio.c
165
bio->bi_ioprio = prio;
block/blk-ioprio.h
12
void blkcg_set_ioprio(struct bio *bio);
block/blk-ioprio.h
14
static inline void blkcg_set_ioprio(struct bio *bio)
block/blk-ioprio.h
9
struct bio;
block/blk-lib.c
121
struct bio **biop, unsigned flags, sector_t limit)
block/blk-lib.c
126
struct bio *bio;
block/blk-lib.c
132
bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
block/blk-lib.c
133
bio->bi_iter.bi_sector = sector;
block/blk-lib.c
135
bio->bi_opf |= REQ_NOUNMAP;
block/blk-lib.c
137
bio->bi_iter.bi_size = len << SECTOR_SHIFT;
block/blk-lib.c
138
*biop = bio_chain_and_submit(*biop, bio);
block/blk-lib.c
150
struct bio *bio = NULL;
block/blk-lib.c
155
__blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio,
block/blk-lib.c
157
if (bio) {
block/blk-lib.c
160
bio_await_chain(bio);
block/blk-lib.c
164
ret = submit_bio_wait(bio);
block/blk-lib.c
165
bio_put(bio);
block/blk-lib.c
195
struct bio **biop, unsigned int flags)
block/blk-lib.c
201
struct bio *bio;
block/blk-lib.c
207
bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask);
block/blk-lib.c
208
bio->bi_iter.bi_sector = sector;
block/blk-lib.c
215
if (!bio_add_folio(bio, zero_folio, len, 0))
block/blk-lib.c
221
*biop = bio_chain_and_submit(*biop, bio);
block/blk-lib.c
229
struct bio *bio = NULL;
block/blk-lib.c
237
__blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags);
block/blk-lib.c
238
if (bio) {
block/blk-lib.c
241
bio_await_chain(bio);
block/blk-lib.c
245
ret = submit_bio_wait(bio);
block/blk-lib.c
246
bio_put(bio);
block/blk-lib.c
273
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
block/blk-lib.c
333
struct bio *bio = NULL;
block/blk-lib.c
353
bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
block/blk-lib.c
354
bio->bi_iter.bi_sector = sector;
block/blk-lib.c
355
bio->bi_iter.bi_size = len << SECTOR_SHIFT;
block/blk-lib.c
361
if (bio) {
block/blk-lib.c
362
ret = submit_bio_wait(bio);
block/blk-lib.c
363
bio_put(bio);
block/blk-lib.c
38
struct bio *blk_alloc_discard_bio(struct block_device *bdev,
block/blk-lib.c
42
struct bio *bio;
block/blk-lib.c
47
bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask);
block/blk-lib.c
48
if (!bio)
block/blk-lib.c
50
bio->bi_iter.bi_sector = *sector;
block/blk-lib.c
51
bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT;
block/blk-lib.c
60
return bio;
block/blk-lib.c
64
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
block/blk-lib.c
66
struct bio *bio;
block/blk-lib.c
68
while ((bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects,
block/blk-lib.c
70
*biop = bio_chain_and_submit(*biop, bio);
block/blk-lib.c
87
struct bio *bio = NULL;
block/blk-lib.c
92
__blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio);
block/blk-lib.c
93
if (bio) {
block/blk-lib.c
94
ret = submit_bio_wait(bio);
block/blk-lib.c
97
bio_put(bio);
block/blk-map.c
103
bio_for_each_segment_all(bvec, bio, iter_all) {
block/blk-map.c
128
static int bio_uncopy_user(struct bio *bio)
block/blk-map.c
130
struct bio_map_data *bmd = bio->bi_private;
block/blk-map.c
141
else if (bio_data_dir(bio) == READ)
block/blk-map.c
142
ret = bio_copy_to_iter(bio, bmd->iter);
block/blk-map.c
144
bio_free_pages(bio);
block/blk-map.c
155
struct bio *bio;
block/blk-map.c
176
bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask);
block/blk-map.c
177
if (!bio)
block/blk-map.c
210
if (bio_add_page(bio, page, bytes, offset) < bytes) {
block/blk-map.c
221
map_data->offset += bio->bi_iter.bi_size;
block/blk-map.c
228
ret = bio_copy_from_iter(bio, iter);
block/blk-map.c
236
ret = bio_copy_from_iter(bio, &iter2);
block/blk-map.c
241
zero_fill_bio(bio);
block/blk-map.c
242
iov_iter_advance(iter, bio->bi_iter.bi_size);
block/blk-map.c
245
bio->bi_private = bmd;
block/blk-map.c
247
ret = blk_rq_append_bio(rq, bio);
block/blk-map.c
253
bio_free_pages(bio);
block/blk-map.c
254
blk_mq_map_bio_put(bio);
block/blk-map.c
264
struct bio *bio;
block/blk-map.c
270
bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
block/blk-map.c
271
if (!bio)
block/blk-map.c
277
ret = bio_iov_iter_get_pages(bio, iter, 0);
block/blk-map.c
280
ret = blk_rq_append_bio(rq, bio);
block/blk-map.c
286
bio_release_pages(bio, false);
block/blk-map.c
288
blk_mq_map_bio_put(bio);
block/blk-map.c
292
static void bio_invalidate_vmalloc_pages(struct bio *bio)
block/blk-map.c
295
if (bio->bi_private && !op_is_write(bio_op(bio))) {
block/blk-map.c
298
for (i = 0; i < bio->bi_vcnt; i++)
block/blk-map.c
299
len += bio->bi_io_vec[i].bv_len;
block/blk-map.c
300
invalidate_kernel_vmap_range(bio->bi_private, len);
block/blk-map.c
305
static void bio_map_kern_endio(struct bio *bio)
block/blk-map.c
307
bio_invalidate_vmalloc_pages(bio);
block/blk-map.c
308
blk_mq_map_bio_put(bio);
block/blk-map.c
311
static struct bio *bio_map_kern(struct request *rq, void *data, unsigned int len,
block/blk-map.c
315
struct bio *bio;
block/blk-map.c
317
bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
block/blk-map.c
318
if (!bio)
block/blk-map.c
322
bio->bi_private = data;
block/blk-map.c
323
if (!bio_add_vmalloc(bio, data, len)) {
block/blk-map.c
324
blk_mq_map_bio_put(bio);
block/blk-map.c
328
bio_add_virt_nofail(bio, data, len);
block/blk-map.c
330
bio->bi_end_io = bio_map_kern_endio;
block/blk-map.c
331
return bio;
block/blk-map.c
334
static void bio_copy_kern_endio(struct bio *bio)
block/blk-map.c
336
bio_free_pages(bio);
block/blk-map.c
337
blk_mq_map_bio_put(bio);
block/blk-map.c
340
static void bio_copy_kern_endio_read(struct bio *bio)
block/blk-map.c
342
char *p = bio->bi_private;
block/blk-map.c
346
bio_for_each_segment_all(bvec, bio, iter_all) {
block/blk-map.c
351
bio_copy_kern_endio(bio);
block/blk-map.c
365
static struct bio *bio_copy_kern(struct request *rq, void *data, unsigned int len,
block/blk-map.c
372
struct bio *bio;
block/blk-map.c
383
bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask);
block/blk-map.c
384
if (!bio)
block/blk-map.c
40
static inline void blk_mq_map_bio_put(struct bio *bio)
block/blk-map.c
401
__bio_add_page(bio, page, bytes, 0);
block/blk-map.c
408
bio->bi_end_io = bio_copy_kern_endio;
block/blk-map.c
410
bio->bi_end_io = bio_copy_kern_endio_read;
block/blk-map.c
411
bio->bi_private = data;
block/blk-map.c
414
return bio;
block/blk-map.c
417
bio_free_pages(bio);
block/blk-map.c
418
blk_mq_map_bio_put(bio);
block/blk-map.c
42
bio_put(bio);
block/blk-map.c
426
int blk_rq_append_bio(struct request *rq, struct bio *bio)
block/blk-map.c
434
ret = bio_split_io_at(bio, lim, &nr_segs, max_bytes, 0);
block/blk-map.c
442
if (rq->bio) {
block/blk-map.c
443
if (!ll_back_merge_fn(rq, bio, nr_segs))
block/blk-map.c
445
rq->phys_gap_bit = bio_seg_gap(rq->q, rq->biotail, bio,
block/blk-map.c
447
rq->biotail->bi_next = bio;
block/blk-map.c
448
rq->biotail = bio;
block/blk-map.c
449
rq->__data_len += bio->bi_iter.bi_size;
block/blk-map.c
45
static struct bio *blk_rq_map_bio_alloc(struct request *rq,
block/blk-map.c
450
bio_crypt_free_ctx(bio);
block/blk-map.c
455
rq->bio = rq->biotail = bio;
block/blk-map.c
456
rq->__data_len = bio->bi_iter.bi_size;
block/blk-map.c
457
rq->phys_gap_bit = bio->bi_bvec_gap_bit;
block/blk-map.c
466
struct bio *bio;
block/blk-map.c
473
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
block/blk-map.c
474
if (!bio)
block/blk-map.c
476
bio_iov_bvec_set(bio, iter);
block/blk-map.c
478
ret = blk_rq_append_bio(rq, bio);
block/blk-map.c
480
blk_mq_map_bio_put(bio);
block/blk-map.c
49
struct bio *bio;
block/blk-map.c
505
struct bio *bio = NULL;
block/blk-map.c
51
bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask,
block/blk-map.c
53
if (!bio)
block/blk-map.c
541
if (!bio)
block/blk-map.c
542
bio = rq->bio;
block/blk-map.c
548
blk_rq_unmap_user(bio);
block/blk-map.c
550
rq->bio = NULL;
block/blk-map.c
56
return bio;
block/blk-map.c
614
int blk_rq_unmap_user(struct bio *bio)
block/blk-map.c
616
struct bio *next_bio;
block/blk-map.c
619
while (bio) {
block/blk-map.c
620
if (bio->bi_private) {
block/blk-map.c
621
ret2 = bio_uncopy_user(bio);
block/blk-map.c
625
bio_release_pages(bio, bio_data_dir(bio) == READ);
block/blk-map.c
628
if (bio_integrity(bio))
block/blk-map.c
629
bio_integrity_unmap_user(bio);
block/blk-map.c
631
next_bio = bio;
block/blk-map.c
632
bio = bio->bi_next;
block/blk-map.c
656
struct bio *bio;
block/blk-map.c
665
bio = bio_copy_kern(rq, kbuf, len, gfp_mask);
block/blk-map.c
667
bio = bio_map_kern(rq, kbuf, len, gfp_mask);
block/blk-map.c
669
if (IS_ERR(bio))
block/blk-map.c
67
static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
block/blk-map.c
670
return PTR_ERR(bio);
block/blk-map.c
672
ret = blk_rq_append_bio(rq, bio);
block/blk-map.c
674
blk_mq_map_bio_put(bio);
block/blk-map.c
72
bio_for_each_segment_all(bvec, bio, iter_all) {
block/blk-map.c
98
static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
block/blk-merge.c
1001
bio->bi_next = req->bio;
block/blk-merge.c
1002
req->bio = bio;
block/blk-merge.c
1004
req->__sector = bio->bi_iter.bi_sector;
block/blk-merge.c
1005
req->__data_len += bio->bi_iter.bi_size;
block/blk-merge.c
1007
bio_crypt_do_front_merge(req, bio);
block/blk-merge.c
1014
struct request *req, struct bio *bio)
block/blk-merge.c
1020
if (blk_rq_sectors(req) + bio_sectors(bio) >
block/blk-merge.c
1024
rq_qos_merge(q, req, bio);
block/blk-merge.c
1026
req->biotail->bi_next = bio;
block/blk-merge.c
1027
req->biotail = bio;
block/blk-merge.c
1028
req->__data_len += bio->bi_iter.bi_size;
block/blk-merge.c
1040
struct bio *bio,
block/blk-merge.c
1044
if (!blk_rq_merge_ok(rq, bio))
block/blk-merge.c
1047
switch (blk_try_merge(rq, bio)) {
block/blk-merge.c
1049
if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
block/blk-merge.c
1050
return bio_attempt_back_merge(rq, bio, nr_segs);
block/blk-merge.c
1053
if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
block/blk-merge.c
1054
return bio_attempt_front_merge(rq, bio, nr_segs);
block/blk-merge.c
1057
return bio_attempt_discard_merge(q, rq, bio);
block/blk-merge.c
1085
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
block/blk-merge.c
1096
return blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
block/blk-merge.c
1104
if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
block/blk-merge.c
1117
struct bio *bio, unsigned int nr_segs)
block/blk-merge.c
1126
switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
block/blk-merge.c
1141
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
block/blk-merge.c
1146
switch (elv_merge(q, &rq, bio)) {
block/blk-merge.c
1148
if (!blk_mq_sched_allow_merge(q, rq, bio))
block/blk-merge.c
1150
if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
block/blk-merge.c
1157
if (!blk_mq_sched_allow_merge(q, rq, bio))
block/blk-merge.c
1159
if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
block/blk-merge.c
1166
return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
block/blk-merge.c
119
struct bio *bio_submit_split_bioset(struct bio *bio, unsigned int split_sectors,
block/blk-merge.c
122
struct bio *split = bio_split(bio, split_sectors, GFP_NOIO, bs);
block/blk-merge.c
125
bio->bi_status = errno_to_blk_status(PTR_ERR(split));
block/blk-merge.c
126
bio_endio(bio);
block/blk-merge.c
130
bio_chain(split, bio);
block/blk-merge.c
131
trace_block_split(split, bio->bi_iter.bi_sector);
block/blk-merge.c
132
WARN_ON_ONCE(bio_zone_write_plugging(bio));
block/blk-merge.c
134
if (should_fail_bio(bio))
block/blk-merge.c
135
bio_io_error(bio);
block/blk-merge.c
136
else if (!blk_throtl_bio(bio))
block/blk-merge.c
137
submit_bio_noacct_nocheck(bio, true);
block/blk-merge.c
143
static struct bio *bio_submit_split(struct bio *bio, int split_sectors)
block/blk-merge.c
146
bio->bi_status = errno_to_blk_status(split_sectors);
block/blk-merge.c
147
bio_endio(bio);
block/blk-merge.c
152
bio = bio_submit_split_bioset(bio, split_sectors,
block/blk-merge.c
153
&bio->bi_bdev->bd_disk->bio_split);
block/blk-merge.c
154
if (bio)
block/blk-merge.c
155
bio->bi_opf |= REQ_NOMERGE;
block/blk-merge.c
158
return bio;
block/blk-merge.c
161
static struct bio *__bio_split_discard(struct bio *bio,
block/blk-merge.c
176
return bio;
block/blk-merge.c
178
if (bio_sectors(bio) <= max_discard_sectors)
block/blk-merge.c
179
return bio;
block/blk-merge.c
187
tmp = bio->bi_iter.bi_sector + split_sectors -
block/blk-merge.c
194
return bio_submit_split(bio, split_sectors);
block/blk-merge.c
197
struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
block/blk-merge.c
20
static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
block/blk-merge.c
202
if (bio_op(bio) == REQ_OP_SECURE_ERASE)
block/blk-merge.c
207
return __bio_split_discard(bio, lim, nsegs, max_sectors);
block/blk-merge.c
22
*bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
block/blk-merge.c
231
static inline unsigned get_max_io_size(struct bio *bio,
block/blk-merge.c
236
bool is_atomic = bio->bi_opf & REQ_ATOMIC;
block/blk-merge.c
244
if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
block/blk-merge.c
25
static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
block/blk-merge.c
253
blk_boundary_sectors_left(bio->bi_iter.bi_sector,
block/blk-merge.c
257
start = bio->bi_iter.bi_sector & (pbs - 1);
block/blk-merge.c
27
struct bvec_iter iter = bio->bi_iter;
block/blk-merge.c
30
bio_get_first_bvec(bio, bv);
block/blk-merge.c
31
if (bv->bv_len == bio->bi_iter.bi_size)
block/blk-merge.c
310
static unsigned int bio_split_alignment(struct bio *bio,
block/blk-merge.c
313
if (op_is_write(bio_op(bio)) && lim->zone_write_granularity)
block/blk-merge.c
337
int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
block/blk-merge.c
34
bio_advance_iter(bio, &iter, iter.bi_size);
block/blk-merge.c
340
struct bio_crypt_ctx *bc = bio_crypt_ctx(bio);
block/blk-merge.c
351
bio_for_each_bvec(bv, bio, iter) {
block/blk-merge.c
382
bio->bi_bvec_gap_bit = ffs(gaps);
block/blk-merge.c
385
if (bio->bi_opf & REQ_ATOMIC)
block/blk-merge.c
392
if (bio->bi_opf & REQ_NOWAIT)
block/blk-merge.c
408
bytes = ALIGN_DOWN(bytes, bio_split_alignment(bio, lim));
block/blk-merge.c
41
*bv = bio->bi_io_vec[idx];
block/blk-merge.c
417
bio_clear_polled(bio);
block/blk-merge.c
418
bio->bi_bvec_gap_bit = ffs(gaps);
block/blk-merge.c
423
struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
block/blk-merge.c
426
return bio_submit_split(bio,
block/blk-merge.c
427
bio_split_rw_at(bio, lim, nr_segs,
block/blk-merge.c
428
get_max_io_size(bio, lim) << SECTOR_SHIFT));
block/blk-merge.c
438
struct bio *bio_split_zone_append(struct bio *bio,
block/blk-merge.c
443
split_sectors = bio_split_rw_at(bio, lim, nr_segs,
block/blk-merge.c
447
return bio_submit_split(bio, split_sectors);
block/blk-merge.c
450
struct bio *bio_split_write_zeroes(struct bio *bio,
block/blk-merge.c
453
unsigned int max_sectors = get_max_io_size(bio, lim);
block/blk-merge.c
464
return bio;
block/blk-merge.c
465
if (bio_sectors(bio) <= max_sectors)
block/blk-merge.c
466
return bio;
block/blk-merge.c
467
return bio_submit_split(bio, max_sectors);
block/blk-merge.c
481
struct bio *bio_split_to_limits(struct bio *bio)
block/blk-merge.c
485
return __bio_split_to_limits(bio, bdev_limits(bio->bi_bdev), &nr_segs);
block/blk-merge.c
496
if (!rq->bio)
block/blk-merge.c
499
switch (bio_op(rq->bio)) {
block/blk-merge.c
503
struct bio *bio = rq->bio;
block/blk-merge.c
505
for_each_bio(bio)
block/blk-merge.c
52
struct request *prev_rq, struct bio *prev, struct bio *next)
block/blk-merge.c
544
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
block/blk-merge.c
547
if (!blk_cgroup_mergeable(req, bio))
block/blk-merge.c
550
if (blk_integrity_merge_bio(req->q, req, bio) == false)
block/blk-merge.c
565
if (bio_integrity(bio))
block/blk-merge.c
567
bio);
block/blk-merge.c
575
int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
block/blk-merge.c
577
if (req_gap_back_merge(req, bio))
block/blk-merge.c
580
integrity_req_gap_back_merge(req, bio))
block/blk-merge.c
582
if (!bio_crypt_ctx_back_mergeable(req, bio))
block/blk-merge.c
584
if (blk_rq_sectors(req) + bio_sectors(bio) >
block/blk-merge.c
590
return ll_new_hw_segment(req, bio, nr_segs);
block/blk-merge.c
593
static int ll_front_merge_fn(struct request *req, struct bio *bio,
block/blk-merge.c
596
if (req_gap_front_merge(req, bio))
block/blk-merge.c
599
integrity_req_gap_front_merge(req, bio))
block/blk-merge.c
601
if (!bio_crypt_ctx_front_mergeable(req, bio))
block/blk-merge.c
603
if (blk_rq_sectors(req) + bio_sectors(bio) >
block/blk-merge.c
604
blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
block/blk-merge.c
609
return ll_new_hw_segment(req, bio, nr_segs);
block/blk-merge.c
619
if (blk_rq_sectors(req) + bio_sectors(next->bio) >
block/blk-merge.c
635
if (req_gap_back_merge(req, next->bio))
block/blk-merge.c
649
if (!blk_cgroup_mergeable(req, next->bio))
block/blk-merge.c
65
bio_get_first_bvec(prev_rq->bio, &pb);
block/blk-merge.c
676
struct bio *bio;
block/blk-merge.c
686
for (bio = rq->bio; bio; bio = bio->bi_next) {
block/blk-merge.c
687
WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
block/blk-merge.c
688
(bio->bi_opf & REQ_FAILFAST_MASK) != ff);
block/blk-merge.c
689
bio->bi_opf |= ff;
block/blk-merge.c
694
static inline blk_opf_t bio_failfast(const struct bio *bio)
block/blk-merge.c
696
if (bio->bi_opf & REQ_RAHEAD)
block/blk-merge.c
699
return bio->bi_opf & REQ_FAILFAST_MASK;
block/blk-merge.c
708
struct bio *bio, bool front_merge)
block/blk-merge.c
711
if (bio->bi_opf & REQ_RAHEAD)
block/blk-merge.c
712
bio->bi_opf |= REQ_FAILFAST_MASK;
block/blk-merge.c
716
req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
block/blk-merge.c
744
struct bio *bio)
block/blk-merge.c
746
return (rq->cmd_flags & REQ_ATOMIC) == (bio->bi_opf & REQ_ATOMIC);
block/blk-merge.c
755
u8 bio_seg_gap(struct request_queue *q, struct bio *prev, struct bio *next,
block/blk-merge.c
786
if (req->bio->bi_write_hint != next->bio->bi_write_hint)
block/blk-merge.c
788
if (req->bio->bi_write_stream != next->bio->bi_write_stream)
block/blk-merge.c
790
if (req->bio->bi_ioprio != next->bio->bi_ioprio)
block/blk-merge.c
837
req->phys_gap_bit = bio_seg_gap(req->q, req->biotail, next->bio,
block/blk-merge.c
840
req->biotail->bi_next = next->bio;
block/blk-merge.c
861
next->bio = NULL;
block/blk-merge.c
87
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
block/blk-merge.c
89
return bio_will_gap(req->q, req, req->biotail, bio);
block/blk-merge.c
898
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
block/blk-merge.c
900
if (!rq_mergeable(rq) || !bio_mergeable(bio))
block/blk-merge.c
903
if (req_op(rq) != bio_op(bio))
block/blk-merge.c
906
if (!blk_cgroup_mergeable(rq, bio))
block/blk-merge.c
908
if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
block/blk-merge.c
910
if (!bio_crypt_rq_ctx_compatible(rq, bio))
block/blk-merge.c
912
if (rq->bio->bi_write_hint != bio->bi_write_hint)
block/blk-merge.c
914
if (rq->bio->bi_write_stream != bio->bi_write_stream)
block/blk-merge.c
916
if (rq->bio->bi_ioprio != bio->bi_ioprio)
block/blk-merge.c
918
if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false)
block/blk-merge.c
92
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
block/blk-merge.c
924
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
block/blk-merge.c
928
else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
block/blk-merge.c
930
else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
block/blk-merge.c
94
return bio_will_gap(req->q, NULL, bio, req->bio);
block/blk-merge.c
945
struct bio *bio, unsigned int nr_segs)
block/blk-merge.c
947
const blk_opf_t ff = bio_failfast(bio);
block/blk-merge.c
949
if (!ll_back_merge_fn(req, bio, nr_segs))
block/blk-merge.c
952
trace_block_bio_backmerge(bio);
block/blk-merge.c
953
rq_qos_merge(req->q, req, bio);
block/blk-merge.c
958
blk_update_mixed_merge(req, bio, false);
block/blk-merge.c
961
blk_zone_write_plug_bio_merged(bio);
block/blk-merge.c
963
req->phys_gap_bit = bio_seg_gap(req->q, req->biotail, bio,
block/blk-merge.c
965
req->biotail->bi_next = bio;
block/blk-merge.c
966
req->biotail = bio;
block/blk-merge.c
967
req->__data_len += bio->bi_iter.bi_size;
block/blk-merge.c
969
bio_crypt_free_ctx(bio);
block/blk-merge.c
976
struct bio *bio, unsigned int nr_segs)
block/blk-merge.c
978
const blk_opf_t ff = bio_failfast(bio);
block/blk-merge.c
988
if (!ll_front_merge_fn(req, bio, nr_segs))
block/blk-merge.c
991
trace_block_bio_frontmerge(bio);
block/blk-merge.c
992
rq_qos_merge(req->q, req, bio);
block/blk-merge.c
997
blk_update_mixed_merge(req, bio, true);
block/blk-merge.c
999
req->phys_gap_bit = bio_seg_gap(req->q, bio, req->bio,
block/blk-mq-dma.c
13
if (!iter->bio || !iter->bio->bi_next)
block/blk-mq-dma.c
143
struct bio *bio = rq->bio;
block/blk-mq-dma.c
152
} else if (bio) {
block/blk-mq-dma.c
154
.bio = bio,
block/blk-mq-dma.c
155
.bvecs = bio->bi_io_vec,
block/blk-mq-dma.c
156
.iter = bio->bi_iter,
block/blk-mq-dma.c
16
iter->bio = iter->bio->bi_next;
block/blk-mq-dma.c
18
iter->iter = bio_integrity(iter->bio)->bip_iter;
block/blk-mq-dma.c
19
iter->bvecs = bio_integrity(iter->bio)->bip_vec;
block/blk-mq-dma.c
21
iter->iter = iter->bio->bi_iter;
block/blk-mq-dma.c
22
iter->bvecs = iter->bio->bi_io_vec;
block/blk-mq-dma.c
347
struct bio *bio = req->bio;
block/blk-mq-dma.c
350
.bio = bio,
block/blk-mq-dma.c
351
.iter = bio_integrity(bio)->bip_iter,
block/blk-mq-dma.c
352
.bvecs = bio_integrity(bio)->bip_vec,
block/blk-mq-dma.c
406
struct bio *bio = rq->bio;
block/blk-mq-dma.c
411
.bio = bio,
block/blk-mq-dma.c
412
.iter = bio_integrity(bio)->bip_iter,
block/blk-mq-dma.c
413
.bvecs = bio_integrity(bio)->bip_vec,
block/blk-mq-sched.c
335
bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
block/blk-mq-sched.c
345
ret = e->type->ops.bio_merge(q, bio, nr_segs);
block/blk-mq-sched.c
350
hctx = blk_mq_map_queue(bio->bi_opf, ctx);
block/blk-mq-sched.c
362
if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
block/blk-mq-sched.h
10
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
block/blk-mq-sched.h
12
bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
block/blk-mq-sched.h
75
static inline bool bio_mergeable(struct bio *bio)
block/blk-mq-sched.h
77
return !(bio->bi_opf & REQ_NOMERGE_FLAGS);
block/blk-mq-sched.h
82
struct bio *bio)
block/blk-mq-sched.h
88
return e->type->ops.allow_merge(q, rq, bio);
block/blk-mq.c
1001
bio->bi_status = BLK_STS_IOERR;
block/blk-mq.c
1005
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
block/blk-mq.c
1007
bio_set_flag(bio, BIO_QUIET);
block/blk-mq.c
1009
bio_advance(bio, bio_bytes);
block/blk-mq.c
1012
if (!bio->bi_iter.bi_size) {
block/blk-mq.c
1013
if (blk_req_bio_is_zone_append(req, bio))
block/blk-mq.c
1014
blk_zone_append_update_request_bio(req, bio);
block/blk-mq.c
1016
bio_endio(bio);
block/blk-mq.c
1029
if (!req->bio) {
block/blk-mq.c
1048
req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
block/blk-mq.c
1093
struct bio *bio = req->bio;
block/blk-mq.c
1099
if (!bio)
block/blk-mq.c
1107
if (!bio->bi_bdev)
block/blk-mq.c
1116
if (blk_rq_bytes(req) & (bdev_logical_block_size(bio->bi_bdev) - 1))
block/blk-mq.c
1139
if (req->bio)
block/blk-mq.c
1140
req->part = req->bio->bi_bdev;
block/blk-mq.c
1208
prefetch(rq->bio);
block/blk-mq.c
1391
if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
block/blk-mq.c
1392
WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
block/blk-mq.c
2685
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
block/blk-mq.c
2690
if (bio->bi_opf & REQ_RAHEAD)
block/blk-mq.c
2693
rq->bio = rq->biotail = bio;
block/blk-mq.c
2694
rq->__sector = bio->bi_iter.bi_sector;
block/blk-mq.c
2695
rq->__data_len = bio->bi_iter.bi_size;
block/blk-mq.c
2696
rq->phys_gap_bit = bio->bi_bvec_gap_bit;
block/blk-mq.c
2699
if (bio_integrity(bio))
block/blk-mq.c
2701
bio);
block/blk-mq.c
2704
err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
block/blk-mq.c
3035
struct bio *bio, unsigned int nr_segs)
block/blk-mq.c
3037
if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
block/blk-mq.c
3038
if (blk_attempt_plug_merge(q, bio, nr_segs))
block/blk-mq.c
3040
if (blk_mq_sched_bio_merge(q, bio, nr_segs))
block/blk-mq.c
3048
struct bio *bio)
block/blk-mq.c
3054
.cmd_flags = bio->bi_opf,
block/blk-mq.c
3063
rq_qos_throttle(q, bio);
block/blk-mq.c
3073
rq_qos_cleanup(q, bio);
block/blk-mq.c
3100
struct bio *bio)
block/blk-mq.c
3110
rq_qos_throttle(rq->q, bio);
block/blk-mq.c
3113
rq->cmd_flags = bio->bi_opf;
block/blk-mq.c
3117
static bool bio_unaligned(const struct bio *bio, struct request_queue *q)
block/blk-mq.c
3122
if ((bio->bi_iter.bi_size & bs_mask) ||
block/blk-mq.c
3123
((bio->bi_iter.bi_sector << SECTOR_SHIFT) & bs_mask))
block/blk-mq.c
3141
void blk_mq_submit_bio(struct bio *bio)
block/blk-mq.c
3143
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
block/blk-mq.c
3145
const int is_sync = op_is_sync(bio->bi_opf);
block/blk-mq.c
3154
rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
block/blk-mq.c
3162
if (bio_zone_write_plugging(bio)) {
block/blk-mq.c
3163
nr_segs = bio->__bi_nr_segments;
block/blk-mq.c
3174
if (unlikely(bio_queue_enter(bio)))
block/blk-mq.c
3183
if (unlikely(bio_unaligned(bio, q))) {
block/blk-mq.c
3184
bio_io_error(bio);
block/blk-mq.c
3188
if ((bio->bi_opf & REQ_POLLED) && !blk_mq_can_poll(q)) {
block/blk-mq.c
3189
bio->bi_status = BLK_STS_NOTSUPP;
block/blk-mq.c
3190
bio_endio(bio);
block/blk-mq.c
3194
bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
block/blk-mq.c
3195
if (!bio)
block/blk-mq.c
3198
if (!bio_integrity_prep(bio))
block/blk-mq.c
3201
blk_mq_bio_issue_init(q, bio);
block/blk-mq.c
3202
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
block/blk-mq.c
3205
if (bio_needs_zone_write_plugging(bio)) {
block/blk-mq.c
3206
if (blk_zone_plug_bio(bio, nr_segs))
block/blk-mq.c
3212
blk_mq_use_cached_rq(rq, plug, bio);
block/blk-mq.c
3214
rq = blk_mq_get_new_requests(q, plug, bio);
block/blk-mq.c
3216
if (bio->bi_opf & REQ_NOWAIT)
block/blk-mq.c
3217
bio_wouldblock_error(bio);
block/blk-mq.c
3222
trace_block_getrq(bio);
block/blk-mq.c
3224
rq_qos_track(q, rq, bio);
block/blk-mq.c
3226
blk_mq_bio_to_request(rq, bio, nr_segs);
block/blk-mq.c
3230
bio->bi_status = ret;
block/blk-mq.c
3231
bio_endio(bio);
block/blk-mq.c
3236
if (bio_zone_write_plugging(bio))
block/blk-mq.c
3239
if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
block/blk-mq.c
3339
struct bio *bio;
block/blk-mq.c
3341
while ((bio = rq->bio) != NULL) {
block/blk-mq.c
3342
rq->bio = bio->bi_next;
block/blk-mq.c
3344
bio_put(bio);
block/blk-mq.c
3368
int (*bio_ctr)(struct bio *, struct bio *, void *),
block/blk-mq.c
3371
struct bio *bio_src;
block/blk-mq.c
3377
struct bio *bio = bio_alloc_clone(rq->q->disk->part0, bio_src,
block/blk-mq.c
3379
if (!bio)
block/blk-mq.c
3382
if (bio_ctr && bio_ctr(bio, bio_src, data)) {
block/blk-mq.c
3383
bio_put(bio);
block/blk-mq.c
3387
if (rq->bio) {
block/blk-mq.c
3388
rq->biotail->bi_next = bio;
block/blk-mq.c
3389
rq->biotail = bio;
block/blk-mq.c
3391
rq->bio = rq->biotail = bio;
block/blk-mq.c
3406
if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
block/blk-mq.c
3425
if (rq->bio) {
block/blk-mq.c
3427
list->tail->bi_next = rq->bio;
block/blk-mq.c
3429
list->head = rq->bio;
block/blk-mq.c
3432
rq->bio = NULL;
block/blk-mq.c
402
struct bio *bio)
block/blk-mq.c
406
bio->issue_time_ns = blk_time_get_ns();
block/blk-mq.c
692
rq->bio = rq->biotail = NULL;
block/blk-mq.c
773
rq->bio = rq->biotail = NULL;
block/blk-mq.c
852
rq->bio, rq->biotail, blk_rq_bytes(rq));
block/blk-mq.c
889
struct bio *bio = req->bio;
block/blk-mq.c
893
if (!bio)
block/blk-mq.c
908
struct bio *next = bio->bi_next;
block/blk-mq.c
911
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
block/blk-mq.c
913
if (blk_req_bio_is_zone_append(req, bio))
block/blk-mq.c
914
blk_zone_append_update_request_bio(req, bio);
block/blk-mq.c
917
bio_endio(bio);
block/blk-mq.c
918
bio = next;
block/blk-mq.c
919
} while (bio);
block/blk-mq.c
927
req->bio = NULL;
block/blk-mq.c
963
if (!req->bio)
block/blk-mq.c
986
while (req->bio) {
block/blk-mq.c
987
struct bio *bio = req->bio;
block/blk-mq.c
988
unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
block/blk-mq.c
991
bio->bi_status = error;
block/blk-mq.c
993
if (bio_bytes == bio->bi_iter.bi_size) {
block/blk-mq.c
994
req->bio = bio->bi_next;
block/blk-mq.c
995
} else if (bio_is_zone_append(bio) && error == BLK_STS_OK) {
block/blk-mq.h
45
void blk_mq_submit_bio(struct bio *bio);
block/blk-rq-qos.c
26
void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio)
block/blk-rq-qos.c
30
rqos->ops->cleanup(rqos, bio);
block/blk-rq-qos.c
62
void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio)
block/blk-rq-qos.c
66
rqos->ops->throttle(rqos, bio);
block/blk-rq-qos.c
71
void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
block/blk-rq-qos.c
75
rqos->ops->track(rqos, rq, bio);
block/blk-rq-qos.c
80
void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio)
block/blk-rq-qos.c
84
rqos->ops->merge(rqos, rq, bio);
block/blk-rq-qos.c
89
void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio)
block/blk-rq-qos.c
93
rqos->ops->done_bio(rqos, bio);
block/blk-rq-qos.h
103
void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
block/blk-rq-qos.h
107
void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
block/blk-rq-qos.h
108
void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
block/blk-rq-qos.h
109
void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
block/blk-rq-qos.h
110
void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
block/blk-rq-qos.h
113
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
block/blk-rq-qos.h
116
__rq_qos_cleanup(q->rq_qos, bio);
block/blk-rq-qos.h
138
static inline void rq_qos_done_bio(struct bio *bio)
block/blk-rq-qos.h
142
if (!bio->bi_bdev || (!bio_flagged(bio, BIO_QOS_THROTTLED) &&
block/blk-rq-qos.h
143
!bio_flagged(bio, BIO_QOS_MERGED)))
block/blk-rq-qos.h
146
q = bdev_get_queue(bio->bi_bdev);
block/blk-rq-qos.h
157
__rq_qos_done_bio(q->rq_qos, bio);
block/blk-rq-qos.h
160
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
block/blk-rq-qos.h
163
bio_set_flag(bio, BIO_QOS_THROTTLED);
block/blk-rq-qos.h
164
__rq_qos_throttle(q->rq_qos, bio);
block/blk-rq-qos.h
169
struct bio *bio)
block/blk-rq-qos.h
172
__rq_qos_track(q->rq_qos, rq, bio);
block/blk-rq-qos.h
176
struct bio *bio)
block/blk-rq-qos.h
179
bio_set_flag(bio, BIO_QOS_MERGED);
block/blk-rq-qos.h
180
__rq_qos_merge(q->rq_qos, rq, bio);
block/blk-rq-qos.h
38
void (*throttle)(struct rq_qos *, struct bio *);
block/blk-rq-qos.h
39
void (*track)(struct rq_qos *, struct request *, struct bio *);
block/blk-rq-qos.h
40
void (*merge)(struct rq_qos *, struct request *, struct bio *);
block/blk-rq-qos.h
44
void (*done_bio)(struct rq_qos *, struct bio *);
block/blk-rq-qos.h
45
void (*cleanup)(struct rq_qos *, struct bio *);
block/blk-throttle.c
1002
struct bio *bio;
block/blk-throttle.c
1010
bio = throtl_pop_queued(sq, &tg_to_put, rw);
block/blk-throttle.c
1012
throtl_charge_iops_bio(tg, bio);
block/blk-throttle.c
1022
throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
block/blk-throttle.c
1025
bio_set_flag(bio, BIO_BPS_THROTTLED);
block/blk-throttle.c
1026
throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
block/blk-throttle.c
1044
struct bio *bio;
block/blk-throttle.c
1048
while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
block/blk-throttle.c
1049
tg_dispatch_time(tg, bio) == 0) {
block/blk-throttle.c
1058
while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
block/blk-throttle.c
1059
tg_dispatch_time(tg, bio) == 0) {
block/blk-throttle.c
1205
struct bio *bio;
block/blk-throttle.c
1213
while ((bio = throtl_pop_queued(td_sq, NULL, rw)))
block/blk-throttle.c
1214
bio_list_add(&bio_list_on_stack, bio);
block/blk-throttle.c
1219
while ((bio = bio_list_pop(&bio_list_on_stack)))
block/blk-throttle.c
1220
submit_bio_noacct_nocheck(bio, false);
block/blk-throttle.c
128
static inline unsigned int throtl_bio_data_size(struct bio *bio)
block/blk-throttle.c
131
if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
block/blk-throttle.c
133
return bio->bi_iter.bi_size;
block/blk-throttle.c
154
static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
block/blk-throttle.c
157
bool rw = bio_data_dir(bio);
block/blk-throttle.c
163
if (bio_flagged(bio, BIO_TG_BPS_THROTTLED) ||
block/blk-throttle.c
164
bio_flagged(bio, BIO_BPS_THROTTLED)) {
block/blk-throttle.c
165
bio_list_add(&qn->bios_iops, bio);
block/blk-throttle.c
168
bio_list_add(&qn->bios_bps, bio);
block/blk-throttle.c
1702
static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
block/blk-throttle.c
1710
if (bio_flagged(bio, BIO_BPS_THROTTLED))
block/blk-throttle.c
1712
tg_dispatch_iops_time(tg, bio) == 0;
block/blk-throttle.c
1721
tg_dispatch_bps_time(tg, bio) == 0)
block/blk-throttle.c
1722
throtl_charge_bps_bio(tg, bio);
block/blk-throttle.c
1727
return tg_dispatch_time(tg, bio) == 0;
block/blk-throttle.c
1730
bool __blk_throtl_bio(struct bio *bio)
block/blk-throttle.c
1732
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
block/blk-throttle.c
1733
struct blkcg_gq *blkg = bio->bi_blkg;
block/blk-throttle.c
1737
bool rw = bio_data_dir(bio);
block/blk-throttle.c
1746
if (tg_within_limit(tg, bio, rw)) {
block/blk-throttle.c
1748
throtl_charge_iops_bio(tg, bio);
block/blk-throttle.c
1762
} else if (bio_issue_as_root_blkg(bio)) {
block/blk-throttle.c
1771
throtl_charge_bps_bio(tg, bio);
block/blk-throttle.c
1772
throtl_charge_iops_bio(tg, bio);
block/blk-throttle.c
1787
bio_set_flag(bio, BIO_BPS_THROTTLED);
block/blk-throttle.c
1795
tg->bytes_disp[rw], bio->bi_iter.bi_size,
block/blk-throttle.c
1801
throtl_add_bio_tg(bio, qn, tg);
block/blk-throttle.c
186
static struct bio *throtl_peek_queued(struct list_head *queued)
block/blk-throttle.c
189
struct bio *bio;
block/blk-throttle.c
195
bio = bio_list_peek(&qn->bios_iops);
block/blk-throttle.c
196
if (!bio)
block/blk-throttle.c
197
bio = bio_list_peek(&qn->bios_bps);
block/blk-throttle.c
198
WARN_ON_ONCE(!bio);
block/blk-throttle.c
199
return bio;
block/blk-throttle.c
218
static struct bio *throtl_pop_queued(struct throtl_service_queue *sq,
block/blk-throttle.c
223
struct bio *bio;
block/blk-throttle.c
229
bio = bio_list_pop(&qn->bios_iops);
block/blk-throttle.c
230
if (bio) {
block/blk-throttle.c
233
bio = bio_list_pop(&qn->bios_bps);
block/blk-throttle.c
234
if (bio)
block/blk-throttle.c
237
WARN_ON_ONCE(!bio);
block/blk-throttle.c
249
return bio;
block/blk-throttle.c
759
static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
block/blk-throttle.c
762
bool rw = bio_data_dir(bio);
block/blk-throttle.c
782
static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
block/blk-throttle.c
785
bool rw = bio_data_dir(bio);
block/blk-throttle.c
789
unsigned int bio_size = throtl_bio_data_size(bio);
block/blk-throttle.c
819
static void throtl_charge_bps_bio(struct throtl_grp *tg, struct bio *bio)
block/blk-throttle.c
821
unsigned int bio_size = throtl_bio_data_size(bio);
block/blk-throttle.c
824
if (!bio_flagged(bio, BIO_BPS_THROTTLED) &&
block/blk-throttle.c
825
!bio_flagged(bio, BIO_TG_BPS_THROTTLED)) {
block/blk-throttle.c
826
bio_set_flag(bio, BIO_TG_BPS_THROTTLED);
block/blk-throttle.c
827
tg->bytes_disp[bio_data_dir(bio)] += bio_size;
block/blk-throttle.c
831
static void throtl_charge_iops_bio(struct throtl_grp *tg, struct bio *bio)
block/blk-throttle.c
833
bio_clear_flag(bio, BIO_TG_BPS_THROTTLED);
block/blk-throttle.c
834
tg->io_disp[bio_data_dir(bio)]++;
block/blk-throttle.c
852
static unsigned long tg_dispatch_bps_time(struct throtl_grp *tg, struct bio *bio)
block/blk-throttle.c
854
bool rw = bio_data_dir(bio);
block/blk-throttle.c
860
bio_flagged(bio, BIO_BPS_THROTTLED) ||
block/blk-throttle.c
861
bio_flagged(bio, BIO_TG_BPS_THROTTLED))
block/blk-throttle.c
865
bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
block/blk-throttle.c
871
static unsigned long tg_dispatch_iops_time(struct throtl_grp *tg, struct bio *bio)
block/blk-throttle.c
873
bool rw = bio_data_dir(bio);
block/blk-throttle.c
881
iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
block/blk-throttle.c
891
static unsigned long tg_dispatch_time(struct throtl_grp *tg, struct bio *bio)
block/blk-throttle.c
893
bool rw = bio_data_dir(bio);
block/blk-throttle.c
903
bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
block/blk-throttle.c
905
wait = tg_dispatch_bps_time(tg, bio);
block/blk-throttle.c
913
throtl_charge_bps_bio(tg, bio);
block/blk-throttle.c
915
return tg_dispatch_iops_time(tg, bio);
block/blk-throttle.c
927
static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
block/blk-throttle.c
931
bool rw = bio_data_dir(bio);
block/blk-throttle.c
945
throtl_qnode_add_bio(bio, qn, sq);
block/blk-throttle.c
952
if (bio_flagged(bio, BIO_BPS_THROTTLED) &&
block/blk-throttle.c
953
bio == throtl_peek_queued(&sq->queued[rw]))
block/blk-throttle.c
963
struct bio *bio;
block/blk-throttle.c
965
bio = throtl_peek_queued(&sq->queued[READ]);
block/blk-throttle.c
966
if (bio)
block/blk-throttle.c
967
read_wait = tg_dispatch_time(tg, bio);
block/blk-throttle.c
969
bio = throtl_peek_queued(&sq->queued[WRITE]);
block/blk-throttle.c
970
if (bio)
block/blk-throttle.c
971
write_wait = tg_dispatch_time(tg, bio);
block/blk-throttle.h
150
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
block/blk-throttle.h
154
bool __blk_throtl_bio(struct bio *bio);
block/blk-throttle.h
168
static inline bool blk_should_throtl(struct bio *bio)
block/blk-throttle.h
171
int rw = bio_data_dir(bio);
block/blk-throttle.h
173
if (!blk_throtl_activated(bio->bi_bdev->bd_queue))
block/blk-throttle.h
176
tg = blkg_to_tg(bio->bi_blkg);
block/blk-throttle.h
178
if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
block/blk-throttle.h
179
bio_set_flag(bio, BIO_CGROUP_ACCT);
block/blk-throttle.h
180
blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
block/blk-throttle.h
181
bio->bi_iter.bi_size);
block/blk-throttle.h
183
blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
block/blk-throttle.h
190
if (tg->has_rules_bps[rw] && !bio_flagged(bio, BIO_BPS_THROTTLED))
block/blk-throttle.h
196
static inline bool blk_throtl_bio(struct bio *bio)
block/blk-throttle.h
202
if (!blk_should_throtl(bio))
block/blk-throttle.h
205
return __blk_throtl_bio(bio);
block/blk-wbt.c
601
static inline bool wbt_should_throttle(struct bio *bio)
block/blk-wbt.c
603
switch (bio_op(bio)) {
block/blk-wbt.c
608
if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
block/blk-wbt.c
619
static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
block/blk-wbt.c
626
if (bio_op(bio) == REQ_OP_READ) {
block/blk-wbt.c
628
} else if (wbt_should_throttle(bio)) {
block/blk-wbt.c
629
if (bio->bi_opf & REQ_SWAP)
block/blk-wbt.c
631
if (bio_op(bio) == REQ_OP_DISCARD)
block/blk-wbt.c
638
static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
block/blk-wbt.c
641
enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
block/blk-wbt.c
646
static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
block/blk-wbt.c
651
flags = bio_to_wbt_flags(rwb, bio);
block/blk-wbt.c
658
__wbt_wait(rwb, flags, bio->bi_opf);
block/blk-wbt.c
664
static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
block/blk-wbt.c
667
rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
block/blk-zoned.c
1091
static void blk_zone_reset_bio_endio(struct bio *bio)
block/blk-zoned.c
1093
struct gendisk *disk = bio->bi_bdev->bd_disk;
block/blk-zoned.c
1094
sector_t sector = bio->bi_iter.bi_sector;
block/blk-zoned.c
1116
static void blk_zone_reset_all_bio_endio(struct bio *bio)
block/blk-zoned.c
1118
struct gendisk *disk = bio->bi_bdev->bd_disk;
block/blk-zoned.c
1142
sector += bdev_zone_sectors(bio->bi_bdev))
block/blk-zoned.c
1147
static void blk_zone_finish_bio_endio(struct bio *bio)
block/blk-zoned.c
1149
struct block_device *bdev = bio->bi_bdev;
block/blk-zoned.c
1151
sector_t sector = bio->bi_iter.bi_sector;
block/blk-zoned.c
1174
void blk_zone_mgmt_bio_endio(struct bio *bio)
block/blk-zoned.c
1177
if (bio->bi_status != BLK_STS_OK)
block/blk-zoned.c
1180
switch (bio_op(bio)) {
block/blk-zoned.c
1182
blk_zone_reset_bio_endio(bio);
block/blk-zoned.c
1185
blk_zone_reset_all_bio_endio(bio);
block/blk-zoned.c
1188
blk_zone_finish_bio_endio(bio);
block/blk-zoned.c
1212
struct bio *bio, unsigned int nr_segs)
block/blk-zoned.c
1220
percpu_ref_get(&bio->bi_bdev->bd_disk->queue->q_usage_counter);
block/blk-zoned.c
1227
bio_clear_polled(bio);
block/blk-zoned.c
1233
bio->__bi_nr_segments = nr_segs;
block/blk-zoned.c
1241
bio_list_add(&zwplug->bio_list, bio);
block/blk-zoned.c
1243
bio->bi_iter.bi_sector, bio_sectors(bio));
block/blk-zoned.c
1249
void blk_zone_write_plug_bio_merged(struct bio *bio)
block/blk-zoned.c
1251
struct gendisk *disk = bio->bi_bdev->bd_disk;
block/blk-zoned.c
1262
if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
block/blk-zoned.c
1265
bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
block/blk-zoned.c
1273
zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
block/blk-zoned.c
1278
zwplug->wp_offset += bio_sectors(bio);
block/blk-zoned.c
1296
struct bio *bio;
block/blk-zoned.c
1317
bio = bio_list_peek(&zwplug->bio_list);
block/blk-zoned.c
1318
if (!bio)
block/blk-zoned.c
1321
if (bio->bi_iter.bi_sector != req_back_sector ||
block/blk-zoned.c
1322
!blk_rq_merge_ok(req, bio))
block/blk-zoned.c
1325
WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE_ZEROES &&
block/blk-zoned.c
1326
!bio->__bi_nr_segments);
block/blk-zoned.c
1329
if (bio_attempt_back_merge(req, bio, bio->__bi_nr_segments) !=
block/blk-zoned.c
1331
bio_list_add_head(&zwplug->bio_list, bio);
block/blk-zoned.c
1337
zwplug->wp_offset += bio_sectors(bio);
block/blk-zoned.c
1340
req_back_sector += bio_sectors(bio);
block/blk-zoned.c
1351
struct bio *bio)
block/blk-zoned.c
1353
struct gendisk *disk = bio->bi_bdev->bd_disk;
block/blk-zoned.c
1375
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
block/blk-zoned.c
1381
bio->bi_opf &= ~REQ_OP_MASK;
block/blk-zoned.c
1382
bio->bi_opf |= REQ_OP_WRITE | REQ_NOMERGE;
block/blk-zoned.c
1383
bio->bi_iter.bi_sector += zwplug->wp_offset;
block/blk-zoned.c
1389
bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND);
block/blk-zoned.c
1396
if (bio_offset_from_zone_start(bio) != zwplug->wp_offset)
block/blk-zoned.c
1401
zwplug->wp_offset += bio_sectors(bio);
block/blk-zoned.c
1407
static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
block/blk-zoned.c
1409
struct gendisk *disk = bio->bi_bdev->bd_disk;
block/blk-zoned.c
1410
sector_t sector = bio->bi_iter.bi_sector;
block/blk-zoned.c
1423
if (WARN_ON_ONCE(bio_straddles_zones(bio))) {
block/blk-zoned.c
1424
bio_io_error(bio);
block/blk-zoned.c
1429
if (!bdev_zone_is_seq(bio->bi_bdev, sector)) {
block/blk-zoned.c
1431
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
block/blk-zoned.c
1432
bio_io_error(bio);
block/blk-zoned.c
1438
if (bio->bi_opf & REQ_NOWAIT)
block/blk-zoned.c
1443
if (bio->bi_opf & REQ_NOWAIT)
block/blk-zoned.c
1444
bio_wouldblock_error(bio);
block/blk-zoned.c
1446
bio_io_error(bio);
block/blk-zoned.c
1451
bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
block/blk-zoned.c
1457
if (bio->bi_opf & REQ_NOWAIT) {
block/blk-zoned.c
1458
bio->bi_opf &= ~REQ_NOWAIT;
block/blk-zoned.c
1466
if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
block/blk-zoned.c
1468
bio_io_error(bio);
block/blk-zoned.c
1480
disk_zone_wplug_add_bio(disk, zwplug, bio, nr_segs);
block/blk-zoned.c
1492
static void blk_zone_wplug_handle_native_zone_append(struct bio *bio)
block/blk-zoned.c
1494
struct gendisk *disk = bio->bi_bdev->bd_disk;
block/blk-zoned.c
1509
zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
block/blk-zoned.c
1536
static bool blk_zone_wplug_handle_zone_mgmt(struct bio *bio)
block/blk-zoned.c
1538
if (bio_op(bio) != REQ_OP_ZONE_RESET_ALL &&
block/blk-zoned.c
1539
!bdev_zone_is_seq(bio->bi_bdev, bio->bi_iter.bi_sector)) {
block/blk-zoned.c
1544
bio_io_error(bio);
block/blk-zoned.c
1554
if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT))
block/blk-zoned.c
1555
bio->bi_opf &= ~REQ_NOWAIT;
block/blk-zoned.c
1572
bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
block/blk-zoned.c
1574
struct block_device *bdev = bio->bi_bdev;
block/blk-zoned.c
1596
switch (bio_op(bio)) {
block/blk-zoned.c
1599
blk_zone_wplug_handle_native_zone_append(bio);
block/blk-zoned.c
1605
return blk_zone_wplug_handle_write(bio, nr_segs);
block/blk-zoned.c
1609
return blk_zone_wplug_handle_zone_mgmt(bio);
block/blk-zoned.c
1644
void blk_zone_append_update_request_bio(struct request *rq, struct bio *bio)
block/blk-zoned.c
1654
bio->bi_iter.bi_sector = rq->__sector;
block/blk-zoned.c
1658
void blk_zone_write_plug_bio_endio(struct bio *bio)
block/blk-zoned.c
1660
struct gendisk *disk = bio->bi_bdev->bd_disk;
block/blk-zoned.c
1662
disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
block/blk-zoned.c
1669
bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING);
block/blk-zoned.c
1675
if (bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) {
block/blk-zoned.c
1676
bio->bi_opf &= ~REQ_OP_MASK;
block/blk-zoned.c
1677
bio->bi_opf |= REQ_OP_ZONE_APPEND;
block/blk-zoned.c
1678
bio_clear_flag(bio, BIO_EMULATES_ZONE_APPEND);
block/blk-zoned.c
1685
if (bio->bi_status != BLK_STS_OK) {
block/blk-zoned.c
1700
if (bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO))
block/blk-zoned.c
1736
struct bio *bio;
block/blk-zoned.c
1745
bio = bio_list_pop(&zwplug->bio_list);
block/blk-zoned.c
1746
if (!bio) {
block/blk-zoned.c
1753
bio->bi_iter.bi_sector, bio_sectors(bio));
block/blk-zoned.c
1755
prepared = blk_zone_wplug_prepare_bio(zwplug, bio);
block/blk-zoned.c
1759
blk_zone_wplug_bio_io_error(zwplug, bio);
block/blk-zoned.c
1763
bdev = bio->bi_bdev;
block/blk-zoned.c
1772
bdev->bd_disk->fops->submit_bio(bio);
block/blk-zoned.c
1775
blk_mq_submit_bio(bio);
block/blk-zoned.c
270
struct bio bio;
block/blk-zoned.c
272
bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC);
block/blk-zoned.c
273
trace_blkdev_zone_mgmt(&bio, 0);
block/blk-zoned.c
274
return submit_bio_wait(&bio);
block/blk-zoned.c
298
struct bio *bio = NULL;
block/blk-zoned.c
329
bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, GFP_KERNEL);
block/blk-zoned.c
330
bio->bi_iter.bi_sector = sector;
block/blk-zoned.c
337
trace_blkdev_zone_mgmt(bio, nr_sectors);
block/blk-zoned.c
338
ret = submit_bio_wait(bio);
block/blk-zoned.c
339
bio_put(bio);
block/blk-zoned.c
726
struct bio *bio)
block/blk-zoned.c
730
bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING);
block/blk-zoned.c
731
bio_io_error(bio);
block/blk-zoned.c
742
struct bio *bio;
block/blk-zoned.c
751
while ((bio = bio_list_pop(&zwplug->bio_list)))
block/blk-zoned.c
752
blk_zone_wplug_bio_io_error(zwplug, bio);
block/blk.h
230
void bio_integrity_free(struct bio *bio);
block/blk.h
238
bool __bio_integrity_endio(struct bio *bio);
block/blk.h
239
static inline bool bio_integrity_endio(struct bio *bio)
block/blk.h
241
struct bio_integrity_payload *bip = bio_integrity(bio);
block/blk.h
244
return __bio_integrity_endio(bio);
block/blk.h
251
struct bio *);
block/blk.h
254
struct bio *next)
block/blk.h
256
struct bio_integrity_payload *bip = bio_integrity(req->bio);
block/blk.h
265
struct bio *bio)
block/blk.h
267
struct bio_integrity_payload *bip = bio_integrity(bio);
block/blk.h
268
struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
block/blk.h
283
struct request *r, struct bio *b)
block/blk.h
288
struct bio *next)
block/blk.h
293
struct bio *bio)
block/blk.h
301
static inline bool bio_integrity_endio(struct bio *bio)
block/blk.h
305
static inline void bio_integrity_free(struct bio *bio)
block/blk.h
320
struct bio *bio, unsigned int nr_segs);
block/blk.h
321
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
block/blk.h
324
struct bio *bio, unsigned int nr_segs);
block/blk.h
358
struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
block/blk.h
360
struct bio *bio_split_write_zeroes(struct bio *bio,
block/blk.h
362
struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
block/blk.h
364
struct bio *bio_split_zone_append(struct bio *bio,
block/blk.h
375
static inline bool bio_may_need_split(struct bio *bio,
block/blk.h
383
if (!bio->bi_io_vec)
block/blk.h
386
bv = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
block/blk.h
387
if (bio->bi_iter.bi_size > bv->bv_len - bio->bi_iter.bi_bvec_done)
block/blk.h
405
static inline struct bio *__bio_split_to_limits(struct bio *bio,
block/blk.h
408
switch (bio_op(bio)) {
block/blk.h
411
if (bio_may_need_split(bio, lim))
block/blk.h
412
return bio_split_rw(bio, lim, nr_segs);
block/blk.h
414
return bio;
block/blk.h
416
return bio_split_zone_append(bio, lim, nr_segs);
block/blk.h
419
return bio_split_discard(bio, lim, nr_segs);
block/blk.h
421
return bio_split_write_zeroes(bio, lim, nr_segs);
block/blk.h
425
return bio;
block/blk.h
450
int ll_back_merge_fn(struct request *req, struct bio *bio,
block/blk.h
455
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
block/blk.h
456
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
block/blk.h
488
static inline bool bio_zone_write_plugging(struct bio *bio)
block/blk.h
490
return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
block/blk.h
493
struct bio *bio)
block/blk.h
496
bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
block/blk.h
498
void blk_zone_write_plug_bio_merged(struct bio *bio);
block/blk.h
500
void blk_zone_append_update_request_bio(struct request *rq, struct bio *bio);
block/blk.h
501
void blk_zone_mgmt_bio_endio(struct bio *bio);
block/blk.h
502
void blk_zone_write_plug_bio_endio(struct bio *bio);
block/blk.h
503
static inline void blk_zone_bio_endio(struct bio *bio)
block/blk.h
513
if (op_is_zone_mgmt(bio_op(bio))) {
block/blk.h
514
blk_zone_mgmt_bio_endio(bio);
block/blk.h
522
if (bio_zone_write_plugging(bio))
block/blk.h
523
blk_zone_write_plug_bio_endio(bio);
block/blk.h
543
static inline bool bio_zone_write_plugging(struct bio *bio)
block/blk.h
548
struct bio *bio)
block/blk.h
552
static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
block/blk.h
559
struct bio *bio)
block/blk.h
56
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
block/blk.h
562
static inline void blk_zone_bio_endio(struct bio *bio)
block/blk.h
57
void submit_bio_noacct_nocheck(struct bio *bio, bool split);
block/blk.h
58
void bio_await_chain(struct bio *bio);
block/blk.h
631
int should_fail_bio(struct bio *bio);
block/blk.h
702
void blk_integrity_generate(struct bio *bio);
block/blk.h
703
void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter);
block/blk.h
84
static inline int bio_queue_enter(struct bio *bio)
block/blk.h
86
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
block/blk.h
93
return __bio_queue_enter(q, bio);
block/bsg-lib.c
138
blk_rq_unmap_user(bio);
block/bsg-lib.c
239
if (req->bio) {
block/bsg-lib.c
33
struct bio *bio;
block/bsg-lib.c
76
job->bidi_bio = job->bidi_rq->bio;
block/bsg-lib.c
94
bio = rq->bio;
block/elevator.c
269
struct bio *bio)
block/elevator.c
280
if (blk_queue_nomerges(q) || !bio_mergeable(bio))
block/elevator.c
286
if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
block/elevator.c
287
enum elv_merge ret = blk_try_merge(q->last_merge, bio);
block/elevator.c
301
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
block/elevator.c
302
if (__rq && elv_bio_merge_ok(__rq, bio)) {
block/elevator.c
311
return e->type->ops.request_merge(q, req, bio);
block/elevator.c
60
static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
block/elevator.c
66
return e->type->ops.allow_merge(q, rq, bio);
block/elevator.c
74
bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
block/elevator.c
76
if (!blk_rq_merge_ok(rq, bio))
block/elevator.c
79
if (!elv_iosched_allow_bio_merge(rq, bio))
block/elevator.h
164
struct bio *);
block/elevator.h
186
extern bool elv_bio_merge_ok(struct request *, struct bio *);
block/elevator.h
66
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
block/elevator.h
67
bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
block/elevator.h
68
int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
block/fops.c
100
bio_release_pages(&bio, should_dirty);
block/fops.c
101
if (unlikely(bio.bi_status))
block/fops.c
102
ret = blk_status_to_errno(bio.bi_status);
block/fops.c
108
bio_uninit(&bio);
block/fops.c
126
struct bio bio ____cacheline_aligned_in_smp;
block/fops.c
131
static void blkdev_bio_end_io(struct bio *bio)
block/fops.c
133
struct blkdev_dio *dio = bio->bi_private;
block/fops.c
137
if (bio->bi_status && !dio->bio.bi_status)
block/fops.c
138
dio->bio.bi_status = bio->bi_status;
block/fops.c
140
if (bio_integrity(bio))
block/fops.c
141
bio_integrity_unmap_user(bio);
block/fops.c
150
if (likely(!dio->bio.bi_status)) {
block/fops.c
154
ret = blk_status_to_errno(dio->bio.bi_status);
block/fops.c
158
bio_put(&dio->bio);
block/fops.c
168
bio_check_pages_dirty(bio);
block/fops.c
170
bio_release_pages(bio, false);
block/fops.c
171
bio_put(bio);
block/fops.c
180
struct bio *bio;
block/fops.c
186
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
block/fops.c
188
dio = container_of(bio, struct blkdev_dio, bio);
block/fops.c
194
bio_get(bio);
block/fops.c
212
bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
block/fops.c
213
bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
block/fops.c
214
bio->bi_write_stream = iocb->ki_write_stream;
block/fops.c
215
bio->bi_private = dio;
block/fops.c
216
bio->bi_end_io = blkdev_bio_end_io;
block/fops.c
217
bio->bi_ioprio = iocb->ki_ioprio;
block/fops.c
219
ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
block/fops.c
221
bio->bi_status = BLK_STS_IOERR;
block/fops.c
222
bio_endio(bio);
block/fops.c
238
bio->bi_opf |= REQ_NOWAIT;
block/fops.c
241
ret = bio_integrity_map_iter(bio, iocb->private);
block/fops.c
248
bio_set_pages_dirty(bio);
block/fops.c
250
task_io_account_write(bio->bi_iter.bi_size);
block/fops.c
252
dio->size += bio->bi_iter.bi_size;
block/fops.c
253
pos += bio->bi_iter.bi_size;
block/fops.c
257
submit_bio(bio);
block/fops.c
261
submit_bio(bio);
block/fops.c
262
bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
block/fops.c
279
ret = blk_status_to_errno(dio->bio.bi_status);
block/fops.c
283
bio_put(&dio->bio);
block/fops.c
286
bio_release_pages(bio, false);
block/fops.c
287
bio_clear_flag(bio, BIO_REFFED);
block/fops.c
288
bio_put(bio);
block/fops.c
293
static void blkdev_bio_end_io_async(struct bio *bio)
block/fops.c
295
struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
block/fops.c
301
if (likely(!bio->bi_status)) {
block/fops.c
305
ret = blk_status_to_errno(bio->bi_status);
block/fops.c
308
if (bio_integrity(bio))
block/fops.c
309
bio_integrity_unmap_user(bio);
block/fops.c
314
bio_check_pages_dirty(bio);
block/fops.c
316
bio_release_pages(bio, false);
block/fops.c
317
bio_put(bio);
block/fops.c
329
struct bio *bio;
block/fops.c
333
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
block/fops.c
335
dio = container_of(bio, struct blkdev_dio, bio);
block/fops.c
338
bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
block/fops.c
339
bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
block/fops.c
340
bio->bi_write_stream = iocb->ki_write_stream;
block/fops.c
341
bio->bi_end_io = blkdev_bio_end_io_async;
block/fops.c
342
bio->bi_ioprio = iocb->ki_ioprio;
block/fops.c
351
bio_iov_bvec_set(bio, iter);
block/fops.c
353
ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
block/fops.c
357
dio->size = bio->bi_iter.bi_size;
block/fops.c
362
bio_set_pages_dirty(bio);
block/fops.c
365
task_io_account_write(bio->bi_iter.bi_size);
block/fops.c
369
ret = bio_integrity_map_iter(bio, iocb->private);
block/fops.c
376
bio->bi_opf |= REQ_ATOMIC;
block/fops.c
379
bio->bi_opf |= REQ_NOWAIT;
block/fops.c
382
bio->bi_opf |= REQ_POLLED;
block/fops.c
383
submit_bio(bio);
block/fops.c
384
WRITE_ONCE(iocb->private, bio);
block/fops.c
386
submit_bio(bio);
block/fops.c
391
bio_put(bio);
block/fops.c
46
static inline int blkdev_iov_iter_get_pages(struct bio *bio,
block/fops.c
49
return bio_iov_iter_get_pages(bio, iter,
block/fops.c
62
struct bio bio;
block/fops.c
74
bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
block/fops.c
78
bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
block/fops.c
80
bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
block/fops.c
81
bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
block/fops.c
82
bio.bi_write_stream = iocb->ki_write_stream;
block/fops.c
83
bio.bi_ioprio = iocb->ki_ioprio;
block/fops.c
85
bio.bi_opf |= REQ_ATOMIC;
block/fops.c
87
ret = blkdev_iov_iter_get_pages(&bio, iter, bdev);
block/fops.c
90
ret = bio.bi_iter.bi_size;
block/fops.c
96
bio.bi_opf |= REQ_NOWAIT;
block/fops.c
975
offsetof(struct blkdev_dio, bio),
block/fops.c
98
submit_bio_wait(&bio);
block/ioctl.c
125
struct bio *prev = NULL, *bio;
block/ioctl.c
163
bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects,
block/ioctl.c
165
if (!bio)
block/ioctl.c
167
prev = bio_chain_and_submit(prev, bio);
block/ioctl.c
883
static void bio_cmd_bio_end_io(struct bio *bio)
block/ioctl.c
885
struct io_uring_cmd *cmd = bio->bi_private;
block/ioctl.c
888
if (unlikely(bio->bi_status) && !bic->res)
block/ioctl.c
889
bic->res = blk_status_to_errno(bio->bi_status);
block/ioctl.c
892
bio_put(bio);
block/ioctl.c
903
struct bio *prev = NULL, *bio;
block/ioctl.c
922
bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects, gfp);
block/ioctl.c
923
if (!bio)
block/ioctl.c
933
bio_put(bio);
block/ioctl.c
936
bio->bi_opf |= REQ_NOWAIT;
block/ioctl.c
939
prev = bio_chain_and_submit(prev, bio);
block/kyber-iosched.c
553
static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
block/kyber-iosched.c
557
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(bio->bi_opf, ctx);
block/kyber-iosched.c
560
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
block/kyber-iosched.c
565
merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
block/mq-deadline.c
572
struct bio *bio)
block/mq-deadline.c
575
const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
block/mq-deadline.c
578
sector_t sector = bio_end_sector(bio);
block/mq-deadline.c
584
__rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
block/mq-deadline.c
588
if (elv_bio_merge_ok(__rq, bio)) {
block/mq-deadline.c
603
static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
block/mq-deadline.c
611
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
block/t10-pi.c
131
struct bio *bio;
block/t10-pi.c
133
__rq_for_each_bio(bio, rq) {
block/t10-pi.c
134
struct bio_integrity_payload *bip = bio_integrity(bio);
block/t10-pi.c
183
struct bio *bio;
block/t10-pi.c
185
__rq_for_each_bio(bio, rq) {
block/t10-pi.c
186
struct bio_integrity_payload *bip = bio_integrity(bio);
block/t10-pi.c
305
struct bio *bio;
block/t10-pi.c
307
__rq_for_each_bio(bio, rq) {
block/t10-pi.c
308
struct bio_integrity_payload *bip = bio_integrity(bio);
block/t10-pi.c
346
struct bio *bio;
block/t10-pi.c
348
__rq_for_each_bio(bio, rq) {
block/t10-pi.c
349
struct bio_integrity_payload *bip = bio_integrity(bio);
block/t10-pi.c
375
void blk_integrity_generate(struct bio *bio)
block/t10-pi.c
377
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
block/t10-pi.c
378
struct bio_integrity_payload *bip = bio_integrity(bio);
block/t10-pi.c
383
iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
block/t10-pi.c
385
iter.seed = bio->bi_iter.bi_sector;
block/t10-pi.c
387
bio_for_each_segment(bv, bio, bviter) {
block/t10-pi.c
407
void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter)
block/t10-pi.c
409
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
block/t10-pi.c
410
struct bio_integrity_payload *bip = bio_integrity(bio);
block/t10-pi.c
419
iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
block/t10-pi.c
423
__bio_for_each_segment(bv, bio, bviter, *saved_iter) {
block/t10-pi.c
443
bio->bi_status = ret;
drivers/block/amiflop.c
1475
data = bio_data(rq->bio) + 512 * cnt;
drivers/block/aoe/aoe.h
110
struct bio *bio;
drivers/block/aoe/aoe.h
184
struct bio *nxbio;
drivers/block/aoe/aoecmd.c
1025
bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
drivers/block/aoe/aoecmd.c
1032
__bio_for_each_segment(bv, bio, iter, iter) {
drivers/block/aoe/aoecmd.c
1043
struct bio *bio;
drivers/block/aoe/aoecmd.c
1052
bio = rq->bio;
drivers/block/aoe/aoecmd.c
1053
bok = !fastfail && !bio->bi_status;
drivers/block/aoe/aoecmd.c
1056
} while (blk_update_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
drivers/block/aoe/aoecmd.c
1115
buf->bio->bi_status = BLK_STS_IOERR;
drivers/block/aoe/aoecmd.c
1128
buf->bio->bi_status = BLK_STS_IOERR;
drivers/block/aoe/aoecmd.c
1136
buf->bio->bi_status = BLK_STS_IOERR;
drivers/block/aoe/aoecmd.c
1139
bvcpy(skb, f->buf->bio, f->iter, n);
drivers/block/aoe/aoecmd.c
1640
buf->bio->bi_status = BLK_STS_IOERR;
drivers/block/aoe/aoecmd.c
296
skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
drivers/block/aoe/aoecmd.c
301
__bio_for_each_segment(bv, bio, iter, iter)
drivers/block/aoe/aoecmd.c
351
if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
drivers/block/aoe/aoecmd.c
352
skb_fillup(skb, f->buf->bio, f->iter);
drivers/block/aoe/aoecmd.c
389
bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
drivers/block/aoe/aoecmd.c
834
bufinit(struct buf *buf, struct request *rq, struct bio *bio)
drivers/block/aoe/aoecmd.c
838
buf->bio = bio;
drivers/block/aoe/aoecmd.c
839
buf->iter = bio->bi_iter;
drivers/block/aoe/aoecmd.c
849
struct bio *bio;
drivers/block/aoe/aoecmd.c
865
d->ip.nxbio = rq->bio;
drivers/block/aoe/aoecmd.c
869
__rq_for_each_bio(bio, rq)
drivers/block/aoe/aoecmd.c
877
bio = d->ip.nxbio;
drivers/block/aoe/aoecmd.c
878
bufinit(buf, rq, bio);
drivers/block/aoe/aoecmd.c
879
bio = bio->bi_next;
drivers/block/aoe/aoecmd.c
880
d->ip.nxbio = bio;
drivers/block/aoe/aoecmd.c
881
if (bio == NULL)
drivers/block/aoe/aoedev.c
164
struct bio *bio;
drivers/block/aoe/aoedev.c
172
while ((bio = d->ip.nxbio)) {
drivers/block/aoe/aoedev.c
173
bio->bi_status = BLK_STS_IOERR;
drivers/block/aoe/aoedev.c
174
d->ip.nxbio = bio->bi_next;
drivers/block/ataflop.c
1574
ReqBuffer = bio_data(fd_request->bio);
drivers/block/brd.c
138
static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
drivers/block/brd.c
140
struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
drivers/block/brd.c
141
sector_t sector = bio->bi_iter.bi_sector;
drivers/block/brd.c
143
blk_opf_t opf = bio->bi_opf;
drivers/block/brd.c
167
bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
drivers/block/brd.c
174
bio_wouldblock_error(bio);
drivers/block/brd.c
176
bio_io_error(bio);
drivers/block/brd.c
202
static void brd_submit_bio(struct bio *bio)
drivers/block/brd.c
204
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
drivers/block/brd.c
206
if (unlikely(op_is_discard(bio->bi_opf))) {
drivers/block/brd.c
207
brd_do_discard(brd, bio->bi_iter.bi_sector,
drivers/block/brd.c
208
bio->bi_iter.bi_size);
drivers/block/brd.c
209
bio_endio(bio);
drivers/block/brd.c
214
if (!brd_rw_bvec(brd, bio))
drivers/block/brd.c
216
} while (bio->bi_iter.bi_size);
drivers/block/brd.c
218
bio_endio(bio);
drivers/block/drbd/drbd_actlog.c
129
struct bio *bio;
drivers/block/drbd/drbd_actlog.c
142
bio = bio_alloc_bioset(bdev->md_bdev, 1, op | op_flags, GFP_NOIO,
drivers/block/drbd/drbd_actlog.c
144
bio->bi_iter.bi_sector = sector;
drivers/block/drbd/drbd_actlog.c
146
if (bio_add_page(bio, device->md_io.page, size, 0) != size)
drivers/block/drbd/drbd_actlog.c
148
bio->bi_private = device;
drivers/block/drbd/drbd_actlog.c
149
bio->bi_end_io = drbd_md_endio;
drivers/block/drbd/drbd_actlog.c
161
bio_get(bio); /* one bio_put() is in the completion handler */
drivers/block/drbd/drbd_actlog.c
165
bio_io_error(bio);
drivers/block/drbd/drbd_actlog.c
167
submit_bio(bio);
drivers/block/drbd/drbd_actlog.c
169
if (!bio->bi_status)
drivers/block/drbd/drbd_actlog.c
173
bio_put(bio);
drivers/block/drbd/drbd_bitmap.c
1043
bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, GFP_NOIO,
drivers/block/drbd/drbd_bitmap.c
1045
bio->bi_iter.bi_sector = on_disk_sector;
drivers/block/drbd/drbd_bitmap.c
1046
__bio_add_page(bio, page, len, 0);
drivers/block/drbd/drbd_bitmap.c
1047
bio->bi_private = ctx;
drivers/block/drbd/drbd_bitmap.c
1048
bio->bi_end_io = drbd_bm_endio;
drivers/block/drbd/drbd_bitmap.c
1051
bio_io_error(bio);
drivers/block/drbd/drbd_bitmap.c
1053
submit_bio(bio);
drivers/block/drbd/drbd_bitmap.c
937
static void drbd_bm_endio(struct bio *bio)
drivers/block/drbd/drbd_bitmap.c
939
struct drbd_bm_aio_ctx *ctx = bio->bi_private;
drivers/block/drbd/drbd_bitmap.c
942
unsigned int idx = bm_page_to_idx(bio_first_page_all(bio));
drivers/block/drbd/drbd_bitmap.c
948
if (bio->bi_status) {
drivers/block/drbd/drbd_bitmap.c
951
ctx->error = blk_status_to_errno(bio->bi_status);
drivers/block/drbd/drbd_bitmap.c
957
bio->bi_status, idx);
drivers/block/drbd/drbd_bitmap.c
966
mempool_free(bio->bi_io_vec[0].bv_page, &drbd_md_io_page_pool);
drivers/block/drbd/drbd_bitmap.c
968
bio_put(bio);
drivers/block/drbd/drbd_bitmap.c
995
struct bio *bio;
drivers/block/drbd/drbd_int.h
1371
extern void __drbd_make_request(struct drbd_device *, struct bio *);
drivers/block/drbd/drbd_int.h
1372
void drbd_submit_bio(struct bio *bio);
drivers/block/drbd/drbd_int.h
1406
extern void drbd_md_endio(struct bio *bio);
drivers/block/drbd/drbd_int.h
1407
extern void drbd_peer_request_endio(struct bio *bio);
drivers/block/drbd/drbd_int.h
1408
extern void drbd_request_endio(struct bio *bio);
drivers/block/drbd/drbd_int.h
1440
extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
drivers/block/drbd/drbd_int.h
1489
int fault_type, struct bio *bio)
drivers/block/drbd/drbd_int.h
1492
if (!bio->bi_bdev) {
drivers/block/drbd/drbd_int.h
1494
bio->bi_status = BLK_STS_IOERR;
drivers/block/drbd/drbd_int.h
1495
bio_endio(bio);
drivers/block/drbd/drbd_int.h
1500
bio_io_error(bio);
drivers/block/drbd/drbd_int.h
1502
submit_bio_noacct(bio);
drivers/block/drbd/drbd_int.h
209
struct bio *private_bio;
drivers/block/drbd/drbd_int.h
224
struct bio *master_bio; /* master bio pointer */
drivers/block/drbd/drbd_main.c
1568
static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
drivers/block/drbd/drbd_main.c
1574
bio_for_each_segment(bvec, bio, iter) {
drivers/block/drbd/drbd_main.c
1587
static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
drivers/block/drbd/drbd_main.c
1593
bio_for_each_segment(bvec, bio, iter) {
drivers/block/drbd/drbd_main.c
1632
struct bio *bio)
drivers/block/drbd/drbd_main.c
1635
return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
drivers/block/drbd/drbd_main.c
1636
(bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
drivers/block/drbd/drbd_main.c
1637
(bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
drivers/block/drbd/drbd_main.c
1638
(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
drivers/block/drbd/drbd_main.c
1639
(bio_op(bio) == REQ_OP_WRITE_ZEROES ?
drivers/block/drbd/drbd_main.c
1641
(DP_ZEROES |(!(bio->bi_opf & REQ_NOUNMAP) ? DP_DISCARD : 0))
drivers/block/drbd/drbd_main.c
1645
return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
drivers/block/drbd/drbd_main.c
2227
struct bio *bio = req->master_bio;
drivers/block/drbd/drbd_main.c
2262
__drbd_make_request(device, bio);
drivers/block/drbd/drbd_receiver.c
1073
static void one_flush_endio(struct bio *bio)
drivers/block/drbd/drbd_receiver.c
1075
struct one_flush_context *octx = bio->bi_private;
drivers/block/drbd/drbd_receiver.c
1079
if (bio->bi_status) {
drivers/block/drbd/drbd_receiver.c
1080
ctx->error = blk_status_to_errno(bio->bi_status);
drivers/block/drbd/drbd_receiver.c
1081
drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
drivers/block/drbd/drbd_receiver.c
1084
bio_put(bio);
drivers/block/drbd/drbd_receiver.c
1096
struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
drivers/block/drbd/drbd_receiver.c
1105
bio_put(bio);
drivers/block/drbd/drbd_receiver.c
1115
bio->bi_private = octx;
drivers/block/drbd/drbd_receiver.c
1116
bio->bi_end_io = one_flush_endio;
drivers/block/drbd/drbd_receiver.c
1121
submit_bio(bio);
drivers/block/drbd/drbd_receiver.c
1449
struct bio *bios = NULL;
drivers/block/drbd/drbd_receiver.c
1450
struct bio *bio;
drivers/block/drbd/drbd_receiver.c
1504
bio = bio_alloc(device->ldev->backing_bdev, nr_pages, peer_req->opf, GFP_NOIO);
drivers/block/drbd/drbd_receiver.c
1506
bio->bi_iter.bi_sector = sector;
drivers/block/drbd/drbd_receiver.c
1507
bio->bi_private = peer_req;
drivers/block/drbd/drbd_receiver.c
1508
bio->bi_end_io = drbd_peer_request_endio;
drivers/block/drbd/drbd_receiver.c
1510
bio->bi_next = bios;
drivers/block/drbd/drbd_receiver.c
1511
bios = bio;
drivers/block/drbd/drbd_receiver.c
1516
if (!bio_add_page(bio, page, len, 0))
drivers/block/drbd/drbd_receiver.c
1530
bio = bios;
drivers/block/drbd/drbd_receiver.c
1532
bio->bi_next = NULL;
drivers/block/drbd/drbd_receiver.c
1534
drbd_submit_bio_noacct(device, peer_request_fault_type(peer_req), bio);
drivers/block/drbd/drbd_receiver.c
1799
struct bio *bio;
drivers/block/drbd/drbd_receiver.c
1817
bio = req->master_bio;
drivers/block/drbd/drbd_receiver.c
1818
D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
drivers/block/drbd/drbd_receiver.c
1820
bio_for_each_segment(bvec, bio, iter) {
drivers/block/drbd/drbd_receiver.c
1831
drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
drivers/block/drbd/drbd_receiver.c
2250
if (m.bio)
drivers/block/drbd/drbd_receiver.c
5488
if (m.bio)
drivers/block/drbd/drbd_req.c
1155
struct bio *bio = req->private_bio;
drivers/block/drbd/drbd_req.c
1158
if (bio_op(bio) != REQ_OP_READ)
drivers/block/drbd/drbd_req.c
1160
else if (bio->bi_opf & REQ_RAHEAD)
drivers/block/drbd/drbd_req.c
1172
bio_io_error(bio);
drivers/block/drbd/drbd_req.c
1173
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
drivers/block/drbd/drbd_req.c
1175
((bio->bi_opf & REQ_NOUNMAP) ? 0 : EE_TRIM));
drivers/block/drbd/drbd_req.c
1176
else if (bio_op(bio) == REQ_OP_DISCARD)
drivers/block/drbd/drbd_req.c
1179
submit_bio_noacct(bio);
drivers/block/drbd/drbd_req.c
1182
bio_io_error(bio);
drivers/block/drbd/drbd_req.c
1203
drbd_request_prepare(struct drbd_device *device, struct bio *bio)
drivers/block/drbd/drbd_req.c
1205
const int rw = bio_data_dir(bio);
drivers/block/drbd/drbd_req.c
1209
req = drbd_req_new(device, bio);
drivers/block/drbd/drbd_req.c
1215
bio->bi_status = BLK_STS_RESOURCE;
drivers/block/drbd/drbd_req.c
1216
bio_endio(bio);
drivers/block/drbd/drbd_req.c
1225
bio, GFP_NOIO,
drivers/block/drbd/drbd_req.c
1232
if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
drivers/block/drbd/drbd_req.c
1233
bio_op(bio) == REQ_OP_DISCARD)
drivers/block/drbd/drbd_req.c
1431
if (m.bio)
drivers/block/drbd/drbd_req.c
1435
void __drbd_make_request(struct drbd_device *device, struct bio *bio)
drivers/block/drbd/drbd_req.c
1437
struct drbd_request *req = drbd_request_prepare(device, bio);
drivers/block/drbd/drbd_req.c
1612
void drbd_submit_bio(struct bio *bio)
drivers/block/drbd/drbd_req.c
1614
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
drivers/block/drbd/drbd_req.c
1616
bio = bio_split_to_limits(bio);
drivers/block/drbd/drbd_req.c
1617
if (!bio)
drivers/block/drbd/drbd_req.c
1623
D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
drivers/block/drbd/drbd_req.c
1626
__drbd_make_request(device, bio);
drivers/block/drbd/drbd_req.c
180
m->bio->bi_status = errno_to_blk_status(m->error);
drivers/block/drbd/drbd_req.c
181
bio_endio(m->bio);
drivers/block/drbd/drbd_req.c
24
static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
drivers/block/drbd/drbd_req.c
272
m->bio = req->master_bio;
drivers/block/drbd/drbd_req.c
570
m->bio = NULL;
drivers/block/drbd/drbd_req.h
263
struct bio *bio;
drivers/block/drbd/drbd_req.h
293
if (m.bio)
drivers/block/drbd/drbd_req.h
316
if (m.bio)
drivers/block/drbd/drbd_worker.c
171
void drbd_peer_request_endio(struct bio *bio)
drivers/block/drbd/drbd_worker.c
173
struct drbd_peer_request *peer_req = bio->bi_private;
drivers/block/drbd/drbd_worker.c
175
bool is_write = bio_data_dir(bio) == WRITE;
drivers/block/drbd/drbd_worker.c
176
bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
drivers/block/drbd/drbd_worker.c
177
bio_op(bio) == REQ_OP_DISCARD;
drivers/block/drbd/drbd_worker.c
179
if (bio->bi_status && drbd_ratelimit())
drivers/block/drbd/drbd_worker.c
182
: "read", bio->bi_status,
drivers/block/drbd/drbd_worker.c
185
if (bio->bi_status)
drivers/block/drbd/drbd_worker.c
188
bio_put(bio); /* no need for the bio anymore */
drivers/block/drbd/drbd_worker.c
206
void drbd_request_endio(struct bio *bio)
drivers/block/drbd/drbd_worker.c
209
struct drbd_request *req = bio->bi_private;
drivers/block/drbd/drbd_worker.c
246
if (!bio->bi_status)
drivers/block/drbd/drbd_worker.c
251
if (unlikely(bio->bi_status)) {
drivers/block/drbd/drbd_worker.c
252
switch (bio_op(bio)) {
drivers/block/drbd/drbd_worker.c
255
if (bio->bi_status == BLK_STS_NOTSUPP)
drivers/block/drbd/drbd_worker.c
261
if (bio->bi_opf & REQ_RAHEAD)
drivers/block/drbd/drbd_worker.c
274
req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
drivers/block/drbd/drbd_worker.c
275
bio_put(bio);
drivers/block/drbd/drbd_worker.c
283
if (m.bio)
drivers/block/drbd/drbd_worker.c
316
void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
drivers/block/drbd/drbd_worker.c
326
bio_for_each_segment(bvec, bio, iter) {
drivers/block/drbd/drbd_worker.c
50
void drbd_md_endio(struct bio *bio)
drivers/block/drbd/drbd_worker.c
54
device = bio->bi_private;
drivers/block/drbd/drbd_worker.c
55
device->md_io.error = blk_status_to_errno(bio->bi_status);
drivers/block/drbd/drbd_worker.c
60
bio_put(bio);
drivers/block/floppy.c
4121
static void floppy_rb0_cb(struct bio *bio)
drivers/block/floppy.c
4123
struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
drivers/block/floppy.c
4126
if (bio->bi_status) {
drivers/block/floppy.c
4128
bio->bi_status);
drivers/block/floppy.c
4136
struct bio bio;
drivers/block/floppy.c
4149
bio_init(&bio, bdev, &bio_vec, 1, REQ_OP_READ);
drivers/block/floppy.c
4150
__bio_add_page(&bio, page, block_size(bdev), 0);
drivers/block/floppy.c
4152
bio.bi_iter.bi_sector = 0;
drivers/block/floppy.c
4153
bio.bi_flags |= (1 << BIO_QUIET);
drivers/block/floppy.c
4154
bio.bi_private = &cbdata;
drivers/block/floppy.c
4155
bio.bi_end_io = floppy_rb0_cb;
drivers/block/floppy.c
4159
submit_bio(&bio);
drivers/block/loop.c
1878
if (rq->bio) {
drivers/block/loop.c
1879
cmd->blkcg_css = bio_blkcg_css(rq->bio);
drivers/block/loop.c
305
struct bio *bio = rq->bio;
drivers/block/loop.c
307
while (bio) {
drivers/block/loop.c
308
zero_fill_bio(bio);
drivers/block/loop.c
309
bio = bio->bi_next;
drivers/block/loop.c
347
struct bio *bio = rq->bio;
drivers/block/loop.c
356
if (rq->bio != rq->biotail) {
drivers/block/loop.c
381
offset = bio->bi_iter.bi_bvec_done;
drivers/block/loop.c
382
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
drivers/block/n64cart.c
102
bio_endio(bio);
drivers/block/n64cart.c
87
static void n64cart_submit_bio(struct bio *bio)
drivers/block/n64cart.c
91
struct device *dev = bio->bi_bdev->bd_disk->private_data;
drivers/block/n64cart.c
92
u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
drivers/block/n64cart.c
94
bio_for_each_segment(bvec, bio, iter) {
drivers/block/n64cart.c
96
bio_io_error(bio);
drivers/block/nbd.c
655
struct bio *bio;
drivers/block/nbd.c
740
bio = req->bio;
drivers/block/nbd.c
741
while (bio) {
drivers/block/nbd.c
742
struct bio *next = bio->bi_next;
drivers/block/nbd.c
746
bio_for_each_segment(bvec, bio, iter) {
drivers/block/nbd.c
781
bio = next;
drivers/block/null_blk/main.c
1387
struct bio *bio;
drivers/block/null_blk/main.c
1390
__rq_for_each_bio(bio, rq)
drivers/block/null_blk/main.c
1391
zero_fill_bio(bio);
drivers/block/ps3disk.c
89
__func__, __LINE__, bio_sectors(iter.bio),
drivers/block/ps3disk.c
90
iter.bio->bi_iter.bi_sector);
drivers/block/ps3vram.c
528
static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
drivers/block/ps3vram.c
529
struct bio *bio)
drivers/block/ps3vram.c
532
int write = bio_data_dir(bio) == WRITE;
drivers/block/ps3vram.c
534
loff_t offset = bio->bi_iter.bi_sector << 9;
drivers/block/ps3vram.c
538
struct bio *next;
drivers/block/ps3vram.c
540
bio_for_each_segment(bvec, bio, iter) {
drivers/block/ps3vram.c
574
bio->bi_status = error;
drivers/block/ps3vram.c
575
bio_endio(bio);
drivers/block/ps3vram.c
579
static void ps3vram_submit_bio(struct bio *bio)
drivers/block/ps3vram.c
581
struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data;
drivers/block/ps3vram.c
589
bio_list_add(&priv->list, bio);
drivers/block/ps3vram.c
596
bio = ps3vram_do_bio(dev, bio);
drivers/block/ps3vram.c
597
} while (bio);
drivers/block/rbd.c
2668
u64 off, u64 len, struct bio *bio)
drivers/block/rbd.c
2671
struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
drivers/block/rbd.c
4768
rq->bio);
drivers/block/rnbd/rnbd-srv.c
105
static void rnbd_dev_bi_end_io(struct bio *bio)
drivers/block/rnbd/rnbd-srv.c
107
struct rnbd_io_private *rnbd_priv = bio->bi_private;
drivers/block/rnbd/rnbd-srv.c
111
rtrs_srv_resp_rdma(rnbd_priv->id, blk_status_to_errno(bio->bi_status));
drivers/block/rnbd/rnbd-srv.c
114
bio_put(bio);
drivers/block/rnbd/rnbd-srv.c
126
struct bio *bio;
drivers/block/rnbd/rnbd-srv.c
148
bio = bio_alloc(file_bdev(sess_dev->bdev_file), !!datalen,
drivers/block/rnbd/rnbd-srv.c
150
if (unlikely(!bio)) {
drivers/block/rnbd/rnbd-srv.c
159
bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
drivers/block/rnbd/rnbd-srv.c
161
bio_add_virt_nofail(bio, data, datalen);
drivers/block/rnbd/rnbd-srv.c
162
bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
drivers/block/rnbd/rnbd-srv.c
163
if (bio->bi_iter.bi_size != le32_to_cpu(msg->bi_size)) {
drivers/block/rnbd/rnbd-srv.c
166
bio->bi_iter.bi_size, msg->bi_size);
drivers/block/rnbd/rnbd-srv.c
172
bio->bi_end_io = rnbd_dev_bi_end_io;
drivers/block/rnbd/rnbd-srv.c
173
bio->bi_private = priv;
drivers/block/rnbd/rnbd-srv.c
174
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
drivers/block/rnbd/rnbd-srv.c
177
bio->bi_ioprio = prio;
drivers/block/rnbd/rnbd-srv.c
179
submit_bio(bio);
drivers/block/rnbd/rnbd-srv.c
184
bio_put(bio);
drivers/block/swim.c
547
bio_data(req->bio));
drivers/block/swim3.c
417
static phys_addr_t swim3_bio_phys(struct bio *bio)
drivers/block/swim3.c
419
return page_to_phys(bio_page(bio)) + bio_offset(bio);
drivers/block/swim3.c
465
init_dma(cp, OUTPUT_MORE, swim3_bio_phys(req->bio), 512);
drivers/block/swim3.c
470
init_dma(cp, INPUT_LAST, swim3_bio_phys(req->bio), n * 512);
drivers/block/ublk_drv.c
1139
return bio_has_data(rq->bio);
drivers/block/ublk_drv.c
1322
struct bio *bio = req->bio;
drivers/block/ublk_drv.c
1329
bio_for_each_integrity_vec(iv, bio, iter) {
drivers/block/virtio_blk.c
165
struct bio *bio;
drivers/block/virtio_blk.c
187
__rq_for_each_bio(bio, req) {
drivers/block/virtio_blk.c
188
u64 sector = bio->bi_iter.bi_sector;
drivers/block/virtio_blk.c
189
u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
drivers/block/xen-blkback/blkback.c
1068
static void end_block_io_op(struct bio *bio)
drivers/block/xen-blkback/blkback.c
1070
__end_block_io_op(bio->bi_private, bio->bi_status);
drivers/block/xen-blkback/blkback.c
1071
bio_put(bio);
drivers/block/xen-blkback/blkback.c
1296
struct bio *bio = NULL;
drivers/block/xen-blkback/blkback.c
1297
struct bio **biolist = pending_req->biolist;
drivers/block/xen-blkback/blkback.c
1428
while ((bio == NULL) ||
drivers/block/xen-blkback/blkback.c
1429
(bio_add_page(bio,
drivers/block/xen-blkback/blkback.c
1433
bio = bio_alloc(preq.bdev, bio_max_segs(nseg - i),
drivers/block/xen-blkback/blkback.c
1436
biolist[nbio++] = bio;
drivers/block/xen-blkback/blkback.c
1437
bio->bi_private = pending_req;
drivers/block/xen-blkback/blkback.c
1438
bio->bi_end_io = end_block_io_op;
drivers/block/xen-blkback/blkback.c
1439
bio->bi_iter.bi_sector = preq.sector_number;
drivers/block/xen-blkback/blkback.c
1446
if (!bio) {
drivers/block/xen-blkback/blkback.c
1449
bio = bio_alloc(preq.bdev, 0, operation | operation_flags,
drivers/block/xen-blkback/blkback.c
1451
biolist[nbio++] = bio;
drivers/block/xen-blkback/blkback.c
1452
bio->bi_private = pending_req;
drivers/block/xen-blkback/blkback.c
1453
bio->bi_end_io = end_block_io_op;
drivers/block/xen-blkback/common.h
356
struct bio *biolist[MAX_INDIRECT_SEGMENTS];
drivers/block/xen-blkfront.c
2012
struct bio *bio;
drivers/block/xen-blkfront.c
2048
while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
drivers/block/xen-blkfront.c
2050
submit_bio(bio);
drivers/block/xen-blkfront.c
2099
merge_bio.head = shadow[j].request->bio;
drivers/block/xen-blkfront.c
2102
shadow[j].request->bio = NULL;
drivers/block/z2ram.c
90
void *buffer = bio_data(req->bio);
drivers/block/zloop.c
492
if (rq->bio != rq->biotail) {
drivers/block/zloop.c
520
__bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter),
drivers/block/zloop.c
522
iter.iov_offset = rq->bio->bi_iter.bi_bvec_done;
drivers/block/zloop.c
633
struct bio *bio;
drivers/block/zloop.c
635
__rq_for_each_bio(bio, rq)
drivers/block/zloop.c
636
zero_fill_bio(bio);
drivers/block/zram/zram_drv.c
1103
bio_init(&req->bio, zram->bdev, &req->bio_vec, 1, REQ_OP_WRITE);
drivers/block/zram/zram_drv.c
1104
req->bio.bi_iter.bi_sector = req->blk_idx * (PAGE_SIZE >> 9);
drivers/block/zram/zram_drv.c
1105
req->bio.bi_end_io = zram_writeback_endio;
drivers/block/zram/zram_drv.c
1106
__bio_add_page(&req->bio, req->page, PAGE_SIZE, 0);
drivers/block/zram/zram_drv.c
1380
struct page *page = bio_first_page_all(req->bio);
drivers/block/zram/zram_drv.c
1391
bio_put(req->bio);
drivers/block/zram/zram_drv.c
1395
static void zram_async_read_endio(struct bio *bio)
drivers/block/zram/zram_drv.c
1397
struct zram_rb_req *req = bio->bi_private;
drivers/block/zram/zram_drv.c
1400
if (bio->bi_status) {
drivers/block/zram/zram_drv.c
1401
req->parent->bi_status = bio->bi_status;
drivers/block/zram/zram_drv.c
1403
bio_put(bio);
drivers/block/zram/zram_drv.c
1419
bio_put(bio);
drivers/block/zram/zram_drv.c
1434
struct bio *parent)
drivers/block/zram/zram_drv.c
1437
struct bio *bio;
drivers/block/zram/zram_drv.c
1443
bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
drivers/block/zram/zram_drv.c
1444
if (!bio) {
drivers/block/zram/zram_drv.c
1452
req->bio = bio;
drivers/block/zram/zram_drv.c
1455
bio->bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
drivers/block/zram/zram_drv.c
1456
bio->bi_private = req;
drivers/block/zram/zram_drv.c
1457
bio->bi_end_io = zram_async_read_endio;
drivers/block/zram/zram_drv.c
1459
__bio_add_page(bio, page, PAGE_SIZE, 0);
drivers/block/zram/zram_drv.c
1461
submit_bio(bio);
drivers/block/zram/zram_drv.c
1468
struct bio bio;
drivers/block/zram/zram_drv.c
1470
bio_init(&bio, req->zram->bdev, &bv, 1, REQ_OP_READ);
drivers/block/zram/zram_drv.c
1471
bio.bi_iter.bi_sector = req->blk_idx * (PAGE_SIZE >> 9);
drivers/block/zram/zram_drv.c
1472
__bio_add_page(&bio, req->page, PAGE_SIZE, 0);
drivers/block/zram/zram_drv.c
1473
req->error = submit_bio_wait(&bio);
drivers/block/zram/zram_drv.c
1502
unsigned long blk_idx, struct bio *parent)
drivers/block/zram/zram_drv.c
1516
unsigned long blk_idx, struct bio *parent)
drivers/block/zram/zram_drv.c
2131
struct bio *parent)
drivers/block/zram/zram_drv.c
2178
u32 index, int offset, struct bio *bio)
drivers/block/zram/zram_drv.c
2182
return zram_read_page(zram, bvec->bv_page, index, bio);
drivers/block/zram/zram_drv.c
2308
u32 index, int offset, struct bio *bio)
drivers/block/zram/zram_drv.c
2316
ret = zram_read_page(zram, page, index, bio);
drivers/block/zram/zram_drv.c
2326
u32 index, int offset, struct bio *bio)
drivers/block/zram/zram_drv.c
2329
return zram_bvec_write_partial(zram, bvec, index, offset, bio);
drivers/block/zram/zram_drv.c
2681
static void zram_bio_discard(struct zram *zram, struct bio *bio)
drivers/block/zram/zram_drv.c
2683
size_t n = bio->bi_iter.bi_size;
drivers/block/zram/zram_drv.c
2684
u32 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
drivers/block/zram/zram_drv.c
2685
u32 offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
drivers/block/zram/zram_drv.c
2715
bio_endio(bio);
drivers/block/zram/zram_drv.c
2718
static void zram_bio_read(struct zram *zram, struct bio *bio)
drivers/block/zram/zram_drv.c
2720
unsigned long start_time = bio_start_io_acct(bio);
drivers/block/zram/zram_drv.c
2721
struct bvec_iter iter = bio->bi_iter;
drivers/block/zram/zram_drv.c
2727
struct bio_vec bv = bio_iter_iovec(bio, iter);
drivers/block/zram/zram_drv.c
2731
if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
drivers/block/zram/zram_drv.c
2733
bio->bi_status = BLK_STS_IOERR;
drivers/block/zram/zram_drv.c
2742
bio_advance_iter_single(bio, &iter, bv.bv_len);
drivers/block/zram/zram_drv.c
2745
bio_end_io_acct(bio, start_time);
drivers/block/zram/zram_drv.c
2746
bio_endio(bio);
drivers/block/zram/zram_drv.c
2749
static void zram_bio_write(struct zram *zram, struct bio *bio)
drivers/block/zram/zram_drv.c
2751
unsigned long start_time = bio_start_io_acct(bio);
drivers/block/zram/zram_drv.c
2752
struct bvec_iter iter = bio->bi_iter;
drivers/block/zram/zram_drv.c
2758
struct bio_vec bv = bio_iter_iovec(bio, iter);
drivers/block/zram/zram_drv.c
2762
if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
drivers/block/zram/zram_drv.c
2764
bio->bi_status = BLK_STS_IOERR;
drivers/block/zram/zram_drv.c
2772
bio_advance_iter_single(bio, &iter, bv.bv_len);
drivers/block/zram/zram_drv.c
2775
bio_end_io_acct(bio, start_time);
drivers/block/zram/zram_drv.c
2776
bio_endio(bio);
drivers/block/zram/zram_drv.c
2782
static void zram_submit_bio(struct bio *bio)
drivers/block/zram/zram_drv.c
2784
struct zram *zram = bio->bi_bdev->bd_disk->private_data;
drivers/block/zram/zram_drv.c
2786
switch (bio_op(bio)) {
drivers/block/zram/zram_drv.c
2788
zram_bio_read(zram, bio);
drivers/block/zram/zram_drv.c
2791
zram_bio_write(zram, bio);
drivers/block/zram/zram_drv.c
2795
zram_bio_discard(zram, bio);
drivers/block/zram/zram_drv.c
2799
bio_endio(bio);
drivers/block/zram/zram_drv.c
514
struct bio bio;
drivers/block/zram/zram_drv.c
524
struct bio *bio;
drivers/block/zram/zram_drv.c
528
struct bio *parent;
drivers/block/zram/zram_drv.c
923
err = blk_status_to_errno(req->bio.bi_status);
drivers/block/zram/zram_drv.c
961
static void zram_writeback_endio(struct bio *bio)
drivers/block/zram/zram_drv.c
963
struct zram_wb_req *req = container_of(bio, struct zram_wb_req, bio);
drivers/block/zram/zram_drv.c
964
struct zram_wb_ctl *wb_ctl = bio->bi_private;
drivers/block/zram/zram_drv.c
984
req->bio.bi_private = wb_ctl;
drivers/block/zram/zram_drv.c
985
submit_bio(&req->bio);
drivers/cdrom/gdrom.c
585
__raw_writel(page_to_phys(bio_page(req->bio)) + bio_offset(req->bio),
drivers/md/bcache/bcache.h
281
struct bio *bio, unsigned int sectors);
drivers/md/bcache/bcache.h
310
struct bio sb_bio;
drivers/md/bcache/bcache.h
424
struct bio sb_bio;
drivers/md/bcache/bcache.h
755
struct bio bio;
drivers/md/bcache/bcache.h
761
struct bio *orig_bio;
drivers/md/bcache/bcache.h
762
struct bio bio;
drivers/md/bcache/bcache.h
945
struct bio *bio,
drivers/md/bcache/bcache.h
950
bio->bi_status = BLK_STS_IOERR;
drivers/md/bcache/bcache.h
951
bio_endio(bio);
drivers/md/bcache/bcache.h
954
submit_bio_noacct(bio);
drivers/md/bcache/bcache.h
973
void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
drivers/md/bcache/bcache.h
976
void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
drivers/md/bcache/bcache.h
978
void bch_bbio_endio(struct cache_set *c, struct bio *bio,
drivers/md/bcache/bcache.h
980
void bch_bbio_free(struct bio *bio, struct cache_set *c);
drivers/md/bcache/bcache.h
981
struct bio *bch_bbio_alloc(struct cache_set *c);
drivers/md/bcache/bcache.h
983
void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
drivers/md/bcache/bcache.h
984
void bch_submit_bbio(struct bio *bio, struct cache_set *c,
drivers/md/bcache/btree.c
236
static void btree_node_read_endio(struct bio *bio)
drivers/md/bcache/btree.c
238
struct closure *cl = bio->bi_private;
drivers/md/bcache/btree.c
247
struct bio *bio;
drivers/md/bcache/btree.c
253
bio = bch_bbio_alloc(b->c);
drivers/md/bcache/btree.c
254
bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
drivers/md/bcache/btree.c
255
bio->bi_end_io = btree_node_read_endio;
drivers/md/bcache/btree.c
256
bio->bi_private = &cl;
drivers/md/bcache/btree.c
257
bio->bi_opf = REQ_OP_READ | REQ_META;
drivers/md/bcache/btree.c
259
bch_bio_map(bio, b->keys.set[0].data);
drivers/md/bcache/btree.c
261
bch_submit_bbio(bio, b->c, &b->key, 0);
drivers/md/bcache/btree.c
264
if (bio->bi_status)
drivers/md/bcache/btree.c
267
bch_bbio_free(bio, b->c);
drivers/md/bcache/btree.c
308
bch_bbio_free(b->bio, b->c);
drivers/md/bcache/btree.c
309
b->bio = NULL;
drivers/md/bcache/btree.c
322
bio_free_pages(b->bio);
drivers/md/bcache/btree.c
326
static void btree_node_write_endio(struct bio *bio)
drivers/md/bcache/btree.c
328
struct closure *cl = bio->bi_private;
drivers/md/bcache/btree.c
331
if (bio->bi_status)
drivers/md/bcache/btree.c
334
bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
drivers/md/bcache/btree.c
347
BUG_ON(b->bio);
drivers/md/bcache/btree.c
348
b->bio = bch_bbio_alloc(b->c);
drivers/md/bcache/btree.c
350
b->bio->bi_end_io = btree_node_write_endio;
drivers/md/bcache/btree.c
351
b->bio->bi_private = cl;
drivers/md/bcache/btree.c
352
b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
drivers/md/bcache/btree.c
353
b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
drivers/md/bcache/btree.c
354
bch_bio_map(b->bio, i);
drivers/md/bcache/btree.c
375
if (!bch_bio_alloc_pages(b->bio, GFP_NOWAIT)) {
drivers/md/bcache/btree.c
380
bio_for_each_segment_all(bv, b->bio, iter_all) {
drivers/md/bcache/btree.c
385
bch_submit_bbio(b->bio, b->c, &k.key, 0);
drivers/md/bcache/btree.c
393
b->bio->bi_vcnt = 0;
drivers/md/bcache/btree.c
394
bch_bio_map(b->bio, i);
drivers/md/bcache/btree.c
396
bch_submit_bbio(b->bio, b->c, &k.key, 0);
drivers/md/bcache/btree.h
145
struct bio *bio;
drivers/md/bcache/debug.c
108
void bch_data_verify(struct cached_dev *dc, struct bio *bio)
drivers/md/bcache/debug.c
110
unsigned int nr_segs = bio_segments(bio);
drivers/md/bcache/debug.c
111
struct bio *check;
drivers/md/bcache/debug.c
118
bio_init_inline(check, bio->bi_bdev, nr_segs, REQ_OP_READ);
drivers/md/bcache/debug.c
119
check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
drivers/md/bcache/debug.c
120
check->bi_iter.bi_size = bio->bi_iter.bi_size;
drivers/md/bcache/debug.c
129
bio_for_each_segment(bv, bio, iter) {
drivers/md/bcache/debug.c
140
(uint64_t) bio->bi_iter.bi_sector);
drivers/md/bcache/debug.c
35
struct bio *bio;
drivers/md/bcache/debug.c
52
bio = bch_bbio_alloc(b->c);
drivers/md/bcache/debug.c
53
bio_set_dev(bio, b->c->cache->bdev);
drivers/md/bcache/debug.c
54
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
drivers/md/bcache/debug.c
55
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
drivers/md/bcache/debug.c
56
bio->bi_opf = REQ_OP_READ | REQ_META;
drivers/md/bcache/debug.c
57
bch_bio_map(bio, sorted);
drivers/md/bcache/debug.c
59
submit_bio_wait(bio);
drivers/md/bcache/debug.c
60
bch_bbio_free(bio, b->c);
drivers/md/bcache/debug.h
12
void bch_data_verify(struct cached_dev *dc, struct bio *bio);
drivers/md/bcache/debug.h
21
static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
drivers/md/bcache/debug.h
5
struct bio;
drivers/md/bcache/io.c
136
void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
drivers/md/bcache/io.c
139
struct bbio *b = container_of(bio, struct bbio, bio);
drivers/md/bcache/io.c
141
int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
drivers/md/bcache/io.c
143
unsigned int threshold = op_is_write(bio_op(bio))
drivers/md/bcache/io.c
166
void bch_bbio_endio(struct cache_set *c, struct bio *bio,
drivers/md/bcache/io.c
169
struct closure *cl = bio->bi_private;
drivers/md/bcache/io.c
17
void bch_bbio_free(struct bio *bio, struct cache_set *c)
drivers/md/bcache/io.c
171
bch_bbio_count_io_errors(c, bio, error, m);
drivers/md/bcache/io.c
172
bio_put(bio);
drivers/md/bcache/io.c
19
struct bbio *b = container_of(bio, struct bbio, bio);
drivers/md/bcache/io.c
24
struct bio *bch_bbio_alloc(struct cache_set *c)
drivers/md/bcache/io.c
27
struct bio *bio = &b->bio;
drivers/md/bcache/io.c
29
bio_init_inline(bio, NULL, meta_bucket_pages(&c->cache->sb), 0);
drivers/md/bcache/io.c
31
return bio;
drivers/md/bcache/io.c
34
void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
drivers/md/bcache/io.c
36
struct bbio *b = container_of(bio, struct bbio, bio);
drivers/md/bcache/io.c
38
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
drivers/md/bcache/io.c
39
bio_set_dev(bio, c->cache->bdev);
drivers/md/bcache/io.c
42
closure_bio_submit(c, bio, bio->bi_private);
drivers/md/bcache/io.c
45
void bch_submit_bbio(struct bio *bio, struct cache_set *c,
drivers/md/bcache/io.c
48
struct bbio *b = container_of(bio, struct bbio, bio);
drivers/md/bcache/io.c
51
__bch_submit_bbio(bio, c);
drivers/md/bcache/io.c
55
void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
drivers/md/bcache/io.c
67
if (bio->bi_opf & REQ_RAHEAD) {
drivers/md/bcache/journal.c
28
static void journal_read_endio(struct bio *bio)
drivers/md/bcache/journal.c
30
struct closure *cl = bio->bi_private;
drivers/md/bcache/journal.c
39
struct bio *bio = &ja->bio;
drivers/md/bcache/journal.c
56
bio_reset(bio, ca->bdev, REQ_OP_READ);
drivers/md/bcache/journal.c
57
bio->bi_iter.bi_sector = bucket + offset;
drivers/md/bcache/journal.c
58
bio->bi_iter.bi_size = len << 9;
drivers/md/bcache/journal.c
60
bio->bi_end_io = journal_read_endio;
drivers/md/bcache/journal.c
61
bio->bi_private = &cl;
drivers/md/bcache/journal.c
62
bch_bio_map(bio, data);
drivers/md/bcache/journal.c
64
closure_bio_submit(ca->set, bio, &cl);
drivers/md/bcache/journal.c
641
static void journal_write_endio(struct bio *bio)
drivers/md/bcache/journal.c
643
struct journal_write *w = bio->bi_private;
drivers/md/bcache/journal.c
645
cache_set_err_on(bio->bi_status, w->c, "journal io error");
drivers/md/bcache/journal.c
681
struct bio *bio;
drivers/md/bcache/journal.c
713
bio = &ca->journal.bio;
drivers/md/bcache/journal.c
717
bio_reset(bio, ca->bdev, REQ_OP_WRITE |
drivers/md/bcache/journal.c
719
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
drivers/md/bcache/journal.c
720
bio->bi_iter.bi_size = sectors << 9;
drivers/md/bcache/journal.c
722
bio->bi_end_io = journal_write_endio;
drivers/md/bcache/journal.c
723
bio->bi_private = w;
drivers/md/bcache/journal.c
724
bch_bio_map(bio, w->data);
drivers/md/bcache/journal.c
726
trace_bcache_journal_write(bio, w->data->keys);
drivers/md/bcache/journal.c
727
bio_list_add(&list, bio);
drivers/md/bcache/journal.c
743
while ((bio = bio_list_pop(&list)))
drivers/md/bcache/journal.c
744
closure_bio_submit(c, bio, cl);
drivers/md/bcache/journal.h
143
struct bio bio;
drivers/md/bcache/movinggc.c
100
io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
drivers/md/bcache/movinggc.c
102
op->bio = &io->bio.bio;
drivers/md/bcache/movinggc.c
119
struct bio *bio = &io->bio.bio;
drivers/md/bcache/movinggc.c
121
bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
drivers/md/bcache/movinggc.c
130
struct bio *bio;
drivers/md/bcache/movinggc.c
161
bio = &io->bio.bio;
drivers/md/bcache/movinggc.c
163
bio->bi_opf = REQ_OP_READ;
drivers/md/bcache/movinggc.c
164
bio->bi_end_io = read_moving_endio;
drivers/md/bcache/movinggc.c
166
if (bch_bio_alloc_pages(bio, GFP_KERNEL))
drivers/md/bcache/movinggc.c
19
struct bbio bio;
drivers/md/bcache/movinggc.c
48
struct bio *bio = &io->bio.bio;
drivers/md/bcache/movinggc.c
50
bio_free_pages(bio);
drivers/md/bcache/movinggc.c
62
static void read_moving_endio(struct bio *bio)
drivers/md/bcache/movinggc.c
64
struct bbio *b = container_of(bio, struct bbio, bio);
drivers/md/bcache/movinggc.c
65
struct moving_io *io = container_of(bio->bi_private,
drivers/md/bcache/movinggc.c
68
if (bio->bi_status)
drivers/md/bcache/movinggc.c
69
io->op.status = bio->bi_status;
drivers/md/bcache/movinggc.c
75
bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move");
drivers/md/bcache/movinggc.c
80
struct bio *bio = &io->bio.bio;
drivers/md/bcache/movinggc.c
82
bio_init_inline(bio, NULL,
drivers/md/bcache/movinggc.c
84
bio_get(bio);
drivers/md/bcache/movinggc.c
85
bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
drivers/md/bcache/movinggc.c
87
bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
drivers/md/bcache/movinggc.c
88
bio->bi_private = &io->cl;
drivers/md/bcache/movinggc.c
89
bch_bio_map(bio, NULL);
drivers/md/bcache/request.c
1007
if (bio_op(bio) == REQ_OP_DISCARD)
drivers/md/bcache/request.c
1018
s->iop.bio = s->orig_bio;
drivers/md/bcache/request.c
1019
bio_get(s->iop.bio);
drivers/md/bcache/request.c
1021
if (bio_op(bio) == REQ_OP_DISCARD &&
drivers/md/bcache/request.c
1026
bio->bi_end_io = backing_request_endio;
drivers/md/bcache/request.c
1027
closure_bio_submit(s->iop.c, bio, cl);
drivers/md/bcache/request.c
1031
s->iop.bio = bio;
drivers/md/bcache/request.c
1033
if (bio->bi_opf & REQ_PREFLUSH) {
drivers/md/bcache/request.c
1038
struct bio *flush;
drivers/md/bcache/request.c
1040
flush = bio_alloc_bioset(bio->bi_bdev, 0,
drivers/md/bcache/request.c
1053
s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
drivers/md/bcache/request.c
1056
bio->bi_end_io = backing_request_endio;
drivers/md/bcache/request.c
1057
closure_bio_submit(s->iop.c, bio, cl);
drivers/md/bcache/request.c
1068
struct bio *bio = &s->bio.bio;
drivers/md/bcache/request.c
1074
bio->bi_end_io = backing_request_endio;
drivers/md/bcache/request.c
1075
closure_bio_submit(s->iop.c, bio, cl);
drivers/md/bcache/request.c
1080
static void detached_dev_end_io(struct bio *bio)
drivers/md/bcache/request.c
1083
container_of(bio, struct detached_dev_io_private, bio);
drivers/md/bcache/request.c
1084
struct bio *orig_bio = ddip->orig_bio;
drivers/md/bcache/request.c
1089
if (bio->bi_status) {
drivers/md/bcache/request.c
1090
struct cached_dev *dc = bio->bi_private;
drivers/md/bcache/request.c
1093
bch_count_backing_io_errors(dc, bio);
drivers/md/bcache/request.c
1094
orig_bio->bi_status = bio->bi_status;
drivers/md/bcache/request.c
1097
bio_put(bio);
drivers/md/bcache/request.c
1102
struct bio *orig_bio, unsigned long start_time)
drivers/md/bcache/request.c
1106
struct bio *clone_bio;
drivers/md/bcache/request.c
111
struct bio *bio = op->bio;
drivers/md/bcache/request.c
1118
ddip = container_of(clone_bio, struct detached_dev_io_private, bio);
drivers/md/bcache/request.c
114
bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
drivers/md/bcache/request.c
116
while (bio_sectors(bio)) {
drivers/md/bcache/request.c
117
unsigned int sectors = min(bio_sectors(bio),
drivers/md/bcache/request.c
1170
void cached_dev_submit_bio(struct bio *bio)
drivers/md/bcache/request.c
1173
struct block_device *orig_bdev = bio->bi_bdev;
drivers/md/bcache/request.c
1177
int rw = bio_data_dir(bio);
drivers/md/bcache/request.c
1181
bio->bi_status = BLK_STS_IOERR;
drivers/md/bcache/request.c
1182
bio_endio(bio);
drivers/md/bcache/request.c
1201
start_time = bio_start_io_acct(bio);
drivers/md/bcache/request.c
1203
bio->bi_iter.bi_sector += dc->sb.data_offset;
drivers/md/bcache/request.c
1206
bio_set_dev(bio, dc->bdev);
drivers/md/bcache/request.c
1207
s = search_alloc(bio, d, orig_bdev, start_time);
drivers/md/bcache/request.c
1208
trace_bcache_request_start(s->d, bio);
drivers/md/bcache/request.c
1210
if (!bio->bi_iter.bi_size) {
drivers/md/bcache/request.c
1219
s->iop.bypass = check_should_bypass(dc, bio);
drivers/md/bcache/request.c
1228
detached_dev_do_request(d, bio, start_time);
drivers/md/bcache/request.c
123
bio->bi_iter.bi_sector += sectors;
drivers/md/bcache/request.c
124
bio->bi_iter.bi_size -= sectors << 9;
drivers/md/bcache/request.c
1253
struct bio *bio, unsigned int sectors)
drivers/md/bcache/request.c
1255
unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
drivers/md/bcache/request.c
1257
swap(bio->bi_iter.bi_size, bytes);
drivers/md/bcache/request.c
1258
zero_fill_bio(bio);
drivers/md/bcache/request.c
1259
swap(bio->bi_iter.bi_size, bytes);
drivers/md/bcache/request.c
1261
bio_advance(bio, bytes);
drivers/md/bcache/request.c
1263
if (!bio->bi_iter.bi_size)
drivers/md/bcache/request.c
1279
void flash_dev_submit_bio(struct bio *bio)
drivers/md/bcache/request.c
128
bio->bi_iter.bi_sector,
drivers/md/bcache/request.c
1283
struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
drivers/md/bcache/request.c
1286
bio->bi_status = BLK_STS_IOERR;
drivers/md/bcache/request.c
1287
bio_endio(bio);
drivers/md/bcache/request.c
1291
s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio));
drivers/md/bcache/request.c
1293
bio = &s->bio.bio;
drivers/md/bcache/request.c
1295
trace_bcache_request_start(s->d, bio);
drivers/md/bcache/request.c
1297
if (!bio->bi_iter.bi_size) {
drivers/md/bcache/request.c
1305
} else if (bio_data_dir(bio)) {
drivers/md/bcache/request.c
1307
&KEY(d->id, bio->bi_iter.bi_sector, 0),
drivers/md/bcache/request.c
1308
&KEY(d->id, bio_end_sector(bio), 0));
drivers/md/bcache/request.c
1310
s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
drivers/md/bcache/request.c
1312
s->iop.bio = bio;
drivers/md/bcache/request.c
134
bio_put(bio);
drivers/md/bcache/request.c
169
static void bch_data_insert_endio(struct bio *bio)
drivers/md/bcache/request.c
171
struct closure *cl = bio->bi_private;
drivers/md/bcache/request.c
174
if (bio->bi_status) {
drivers/md/bcache/request.c
177
op->status = bio->bi_status;
drivers/md/bcache/request.c
184
bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
drivers/md/bcache/request.c
190
struct bio *bio = op->bio, *n;
drivers/md/bcache/request.c
195
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
drivers/md/bcache/request.c
202
bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
drivers/md/bcache/request.c
220
SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
drivers/md/bcache/request.c
222
if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
drivers/md/bcache/request.c
227
n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
drivers/md/bcache/request.c
249
} while (n != bio);
drivers/md/bcache/request.c
279
bio_put(bio);
drivers/md/bcache/request.c
312
trace_bcache_write(op->c, op->inode, op->bio,
drivers/md/bcache/request.c
316
bio_get(op->bio);
drivers/md/bcache/request.c
363
static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
drivers/md/bcache/request.c
372
(bio_op(bio) == REQ_OP_DISCARD))
drivers/md/bcache/request.c
392
op_is_write(bio_op(bio))))
drivers/md/bcache/request.c
40
static void bio_csum(struct bio *bio, struct bkey *k)
drivers/md/bcache/request.c
405
if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
drivers/md/bcache/request.c
406
if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
drivers/md/bcache/request.c
411
if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
drivers/md/bcache/request.c
412
bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
drivers/md/bcache/request.c
430
hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
drivers/md/bcache/request.c
431
if (i->last == bio->bi_iter.bi_sector &&
drivers/md/bcache/request.c
440
if (i->sequential + bio->bi_iter.bi_size > i->sequential)
drivers/md/bcache/request.c
441
i->sequential += bio->bi_iter.bi_size;
drivers/md/bcache/request.c
443
i->last = bio_end_sector(bio);
drivers/md/bcache/request.c
458
trace_bcache_bypass_sequential(bio);
drivers/md/bcache/request.c
46
bio_for_each_segment(bv, bio, iter) {
drivers/md/bcache/request.c
463
trace_bcache_bypass_congested(bio);
drivers/md/bcache/request.c
468
bch_rescale_priorities(c, bio_sectors(bio));
drivers/md/bcache/request.c
471
bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
drivers/md/bcache/request.c
481
struct bbio bio;
drivers/md/bcache/request.c
482
struct bio *orig_bio;
drivers/md/bcache/request.c
483
struct bio *cache_miss;
drivers/md/bcache/request.c
499
static void bch_cache_read_endio(struct bio *bio)
drivers/md/bcache/request.c
501
struct bbio *b = container_of(bio, struct bbio, bio);
drivers/md/bcache/request.c
502
struct closure *cl = bio->bi_private;
drivers/md/bcache/request.c
512
if (bio->bi_status)
drivers/md/bcache/request.c
513
s->iop.status = bio->bi_status;
drivers/md/bcache/request.c
520
bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
drivers/md/bcache/request.c
530
struct bio *n, *bio = &s->bio.bio;
drivers/md/bcache/request.c
534
if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
drivers/md/bcache/request.c
538
KEY_START(k) > bio->bi_iter.bi_sector) {
drivers/md/bcache/request.c
539
unsigned int bio_sectors = bio_sectors(bio);
drivers/md/bcache/request.c
542
KEY_START(k) - bio->bi_iter.bi_sector)
drivers/md/bcache/request.c
544
int ret = s->d->cache_miss(b, s, bio, sectors);
drivers/md/bcache/request.c
564
n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
drivers/md/bcache/request.c
565
KEY_OFFSET(k) - bio->bi_iter.bi_sector),
drivers/md/bcache/request.c
568
bio_key = &container_of(n, struct bbio, bio)->key;
drivers/md/bcache/request.c
589
return n == bio ? MAP_DONE : MAP_CONTINUE;
drivers/md/bcache/request.c
595
struct bio *bio = &s->bio.bio;
drivers/md/bcache/request.c
602
&KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
drivers/md/bcache/request.c
635
static void request_endio(struct bio *bio)
drivers/md/bcache/request.c
637
struct closure *cl = bio->bi_private;
drivers/md/bcache/request.c
639
if (bio->bi_status) {
drivers/md/bcache/request.c
642
s->iop.status = bio->bi_status;
drivers/md/bcache/request.c
647
bio_put(bio);
drivers/md/bcache/request.c
651
static void backing_request_endio(struct bio *bio)
drivers/md/bcache/request.c
653
struct closure *cl = bio->bi_private;
drivers/md/bcache/request.c
655
if (bio->bi_status) {
drivers/md/bcache/request.c
667
bio->bi_opf & REQ_PREFLUSH)) {
drivers/md/bcache/request.c
669
dc->bdev, bio->bi_status);
drivers/md/bcache/request.c
672
s->iop.status = bio->bi_status;
drivers/md/bcache/request.c
676
bch_count_backing_io_errors(dc, bio);
drivers/md/bcache/request.c
679
bio_put(bio);
drivers/md/bcache/request.c
697
struct bio *orig_bio,
drivers/md/bcache/request.c
700
struct bio *bio = &s->bio.bio;
drivers/md/bcache/request.c
702
bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO);
drivers/md/bcache/request.c
709
bio->bi_end_io = end_io_fn;
drivers/md/bcache/request.c
710
bio->bi_private = &s->cl;
drivers/md/bcache/request.c
712
bio_cnt_set(bio, 3);
drivers/md/bcache/request.c
721
if (s->iop.bio)
drivers/md/bcache/request.c
722
bio_put(s->iop.bio);
drivers/md/bcache/request.c
729
static inline struct search *search_alloc(struct bio *bio,
drivers/md/bcache/request.c
738
do_bio_hook(s, bio, request_endio);
drivers/md/bcache/request.c
741
s->orig_bio = bio;
drivers/md/bcache/request.c
746
s->write = op_is_write(bio_op(bio));
drivers/md/bcache/request.c
752
s->iop.bio = NULL;
drivers/md/bcache/request.c
758
s->iop.flush_journal = op_is_flush(bio->bi_opf);
drivers/md/bcache/request.c
784
if (s->iop.bio)
drivers/md/bcache/request.c
785
bio_free_pages(s->iop.bio);
drivers/md/bcache/request.c
793
struct bio *bio = &s->bio.bio;
drivers/md/bcache/request.c
812
closure_bio_submit(s->iop.c, bio, cl);
drivers/md/bcache/request.c
826
if (s->iop.bio)
drivers/md/bcache/request.c
827
bio_free_pages(s->iop.bio);
drivers/md/bcache/request.c
846
if (s->iop.bio) {
drivers/md/bcache/request.c
847
bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ);
drivers/md/bcache/request.c
848
s->iop.bio->bi_iter.bi_sector =
drivers/md/bcache/request.c
850
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
drivers/md/bcache/request.c
851
bio_clone_blkg_association(s->iop.bio, s->cache_miss);
drivers/md/bcache/request.c
852
bch_bio_map(s->iop.bio, NULL);
drivers/md/bcache/request.c
854
bio_copy_data(s->cache_miss, s->iop.bio);
drivers/md/bcache/request.c
866
if (s->iop.bio &&
drivers/md/bcache/request.c
886
else if (s->iop.bio || verify(dc))
drivers/md/bcache/request.c
893
struct bio *bio, unsigned int sectors)
drivers/md/bcache/request.c
897
struct bio *miss, *cache_bio;
drivers/md/bcache/request.c
903
miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
drivers/md/bcache/request.c
904
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
drivers/md/bcache/request.c
911
s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
drivers/md/bcache/request.c
914
bio->bi_iter.bi_sector + s->insert_bio_sectors,
drivers/md/bcache/request.c
923
miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
drivers/md/bcache/request.c
927
ret = miss == bio ? MAP_DONE : -EINTR;
drivers/md/bcache/request.c
946
s->iop.bio = cache_bio;
drivers/md/bcache/request.c
984
struct bio *bio = &s->bio.bio;
drivers/md/bcache/request.c
985
struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
drivers/md/bcache/request.c
986
struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
drivers/md/bcache/request.h
40
void cached_dev_submit_bio(struct bio *bio);
drivers/md/bcache/request.h
43
void flash_dev_submit_bio(struct bio *bio);
drivers/md/bcache/request.h
8
struct bio *bio;
drivers/md/bcache/super.c
1941
if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
drivers/md/bcache/super.c
2247
bio_init_inline(&ca->journal.bio, NULL, 8, 0);
drivers/md/bcache/super.c
279
static void write_bdev_super_endio(struct bio *bio)
drivers/md/bcache/super.c
281
struct cached_dev *dc = bio->bi_private;
drivers/md/bcache/super.c
283
if (bio->bi_status)
drivers/md/bcache/super.c
284
bch_count_backing_io_errors(dc, bio);
drivers/md/bcache/super.c
290
struct bio *bio)
drivers/md/bcache/super.c
294
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
drivers/md/bcache/super.c
295
bio->bi_iter.bi_sector = SB_SECTOR;
drivers/md/bcache/super.c
296
bio_add_virt_nofail(bio, out, SB_SIZE);
drivers/md/bcache/super.c
326
submit_bio(bio);
drivers/md/bcache/super.c
339
struct bio *bio = &dc->sb_bio;
drivers/md/bcache/super.c
344
bio_init(bio, dc->bdev, dc->sb_bv, 1, 0);
drivers/md/bcache/super.c
345
bio->bi_end_io = write_bdev_super_endio;
drivers/md/bcache/super.c
346
bio->bi_private = dc;
drivers/md/bcache/super.c
350
__write_super(&dc->sb, dc->sb_disk, bio);
drivers/md/bcache/super.c
355
static void write_super_endio(struct bio *bio)
drivers/md/bcache/super.c
357
struct cache *ca = bio->bi_private;
drivers/md/bcache/super.c
360
bch_count_io_errors(ca, bio->bi_status, 0,
drivers/md/bcache/super.c
376
struct bio *bio = &ca->sb_bio;
drivers/md/bcache/super.c
387
bio_init(bio, ca->bdev, ca->sb_bv, 1, 0);
drivers/md/bcache/super.c
388
bio->bi_end_io = write_super_endio;
drivers/md/bcache/super.c
389
bio->bi_private = ca;
drivers/md/bcache/super.c
392
__write_super(&ca->sb, ca->sb_disk, bio);
drivers/md/bcache/super.c
399
static void uuid_endio(struct bio *bio)
drivers/md/bcache/super.c
401
struct closure *cl = bio->bi_private;
drivers/md/bcache/super.c
404
cache_set_err_on(bio->bi_status, c, "accessing uuids");
drivers/md/bcache/super.c
405
bch_bbio_free(bio, c);
drivers/md/bcache/super.c
429
struct bio *bio = bch_bbio_alloc(c);
drivers/md/bcache/super.c
431
bio->bi_opf = opf | REQ_SYNC | REQ_META;
drivers/md/bcache/super.c
432
bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
drivers/md/bcache/super.c
434
bio->bi_end_io = uuid_endio;
drivers/md/bcache/super.c
435
bio->bi_private = cl;
drivers/md/bcache/super.c
436
bch_bio_map(bio, c->uuids);
drivers/md/bcache/super.c
438
bch_submit_bbio(bio, c, k, i);
drivers/md/bcache/super.c
581
static void prio_endio(struct bio *bio)
drivers/md/bcache/super.c
583
struct cache *ca = bio->bi_private;
drivers/md/bcache/super.c
585
cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
drivers/md/bcache/super.c
586
bch_bbio_free(bio, ca->set);
drivers/md/bcache/super.c
593
struct bio *bio = bch_bbio_alloc(ca->set);
drivers/md/bcache/super.c
597
bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
drivers/md/bcache/super.c
598
bio_set_dev(bio, ca->bdev);
drivers/md/bcache/super.c
599
bio->bi_iter.bi_size = meta_bucket_bytes(&ca->sb);
drivers/md/bcache/super.c
601
bio->bi_end_io = prio_endio;
drivers/md/bcache/super.c
602
bio->bi_private = ca;
drivers/md/bcache/super.c
603
bio->bi_opf = opf | REQ_SYNC | REQ_META;
drivers/md/bcache/super.c
604
bch_bio_map(bio, ca->disk_buckets);
drivers/md/bcache/super.c
606
closure_bio_submit(ca->set, bio, &ca->prio);
drivers/md/bcache/super.c
949
if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
drivers/md/bcache/super.c
954
offsetof(struct detached_dev_io_private, bio),
drivers/md/bcache/util.c
231
void bch_bio_map(struct bio *bio, void *base)
drivers/md/bcache/util.c
233
size_t size = bio->bi_iter.bi_size;
drivers/md/bcache/util.c
234
struct bio_vec *bv = bio->bi_io_vec;
drivers/md/bcache/util.c
236
BUG_ON(!bio->bi_iter.bi_size);
drivers/md/bcache/util.c
237
BUG_ON(bio->bi_vcnt);
drivers/md/bcache/util.c
242
for (; size; bio->bi_vcnt++, bv++) {
drivers/md/bcache/util.c
268
int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
drivers/md/bcache/util.c
277
for (i = 0, bv = bio->bi_io_vec; i < bio->bi_vcnt; bv++, i++) {
drivers/md/bcache/util.c
280
while (--bv >= bio->bi_io_vec)
drivers/md/bcache/util.h
558
void bch_bio_map(struct bio *bio, void *base);
drivers/md/bcache/util.h
559
int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
drivers/md/bcache/writeback.c
326
struct bio bio;
drivers/md/bcache/writeback.c
332
struct bio *bio = &io->bio;
drivers/md/bcache/writeback.c
334
bio_init_inline(bio, NULL,
drivers/md/bcache/writeback.c
337
bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
drivers/md/bcache/writeback.c
339
bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
drivers/md/bcache/writeback.c
340
bio->bi_private = w;
drivers/md/bcache/writeback.c
341
bch_bio_map(bio, NULL);
drivers/md/bcache/writeback.c
354
struct keybuf_key *w = io->bio.bi_private;
drivers/md/bcache/writeback.c
357
bio_free_pages(&io->bio);
drivers/md/bcache/writeback.c
390
static void dirty_endio(struct bio *bio)
drivers/md/bcache/writeback.c
392
struct keybuf_key *w = bio->bi_private;
drivers/md/bcache/writeback.c
395
if (bio->bi_status) {
drivers/md/bcache/writeback.c
397
bch_count_backing_io_errors(io->dc, bio);
drivers/md/bcache/writeback.c
406
struct keybuf_key *w = io->bio.bi_private;
drivers/md/bcache/writeback.c
437
io->bio.bi_opf = REQ_OP_WRITE;
drivers/md/bcache/writeback.c
438
io->bio.bi_iter.bi_sector = KEY_START(&w->key);
drivers/md/bcache/writeback.c
439
bio_set_dev(&io->bio, io->dc->bdev);
drivers/md/bcache/writeback.c
440
io->bio.bi_end_io = dirty_endio;
drivers/md/bcache/writeback.c
443
closure_bio_submit(io->dc->disk.c, &io->bio, cl);
drivers/md/bcache/writeback.c
452
static void read_dirty_endio(struct bio *bio)
drivers/md/bcache/writeback.c
454
struct keybuf_key *w = bio->bi_private;
drivers/md/bcache/writeback.c
459
bio->bi_status, 1,
drivers/md/bcache/writeback.c
462
dirty_endio(bio);
drivers/md/bcache/writeback.c
469
closure_bio_submit(io->dc->disk.c, &io->bio, cl);
drivers/md/bcache/writeback.c
550
io->bio.bi_opf = REQ_OP_READ;
drivers/md/bcache/writeback.c
551
io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
drivers/md/bcache/writeback.c
552
bio_set_dev(&io->bio, dc->disk.c->cache->bdev);
drivers/md/bcache/writeback.c
553
io->bio.bi_end_io = read_dirty_endio;
drivers/md/bcache/writeback.c
555
if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
drivers/md/bcache/writeback.h
102
static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
drivers/md/bcache/writeback.h
112
if (bio_op(bio) == REQ_OP_DISCARD)
drivers/md/bcache/writeback.h
116
bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
drivers/md/bcache/writeback.h
117
bio_sectors(bio)))
drivers/md/bcache/writeback.h
123
return (op_is_sync(bio->bi_opf) ||
drivers/md/bcache/writeback.h
124
bio->bi_opf & (REQ_META|REQ_PRIO) ||
drivers/md/dm-audit.c
70
struct bio *bio, sector_t sector, int result)
drivers/md/dm-audit.c
73
int dev_major = MAJOR(bio->bi_bdev->bd_dev);
drivers/md/dm-audit.c
74
int dev_minor = MINOR(bio->bi_bdev->bd_dev);
drivers/md/dm-audit.h
18
struct bio *bio, sector_t sector, int result);
drivers/md/dm-audit.h
46
struct bio *bio, sector_t sector,
drivers/md/dm-bio-prison-v1.c
140
struct bio *inmate,
drivers/md/dm-bio-prison-v1.c
177
struct bio *inmate,
drivers/md/dm-bio-prison-v1.c
193
struct bio *inmate,
drivers/md/dm-bio-prison-v1.c
257
struct bio *bio;
drivers/md/dm-bio-prison-v1.c
262
while ((bio = bio_list_pop(&bios))) {
drivers/md/dm-bio-prison-v1.c
263
bio->bi_status = error;
drivers/md/dm-bio-prison-v1.c
264
bio_endio(bio);
drivers/md/dm-bio-prison-v1.c
88
struct bio *holder,
drivers/md/dm-bio-prison-v1.h
56
struct bio *holder;
drivers/md/dm-bio-prison-v1.h
88
struct bio *inmate,
drivers/md/dm-bio-prison-v2.c
153
struct bio *inmate,
drivers/md/dm-bio-prison-v2.c
176
struct bio *inmate,
drivers/md/dm-bio-prison-v2.h
91
struct bio *inmate,
drivers/md/dm-bio-record.h
33
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
drivers/md/dm-bio-record.h
35
bd->bi_bdev = bio->bi_bdev;
drivers/md/dm-bio-record.h
36
bd->bi_flags = bio->bi_flags;
drivers/md/dm-bio-record.h
37
bd->bi_iter = bio->bi_iter;
drivers/md/dm-bio-record.h
38
bd->__bi_remaining = atomic_read(&bio->__bi_remaining);
drivers/md/dm-bio-record.h
39
bd->bi_end_io = bio->bi_end_io;
drivers/md/dm-bio-record.h
41
bd->bi_integrity = bio_integrity(bio);
drivers/md/dm-bio-record.h
45
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
drivers/md/dm-bio-record.h
47
bio->bi_bdev = bd->bi_bdev;
drivers/md/dm-bio-record.h
48
bio->bi_flags = bd->bi_flags;
drivers/md/dm-bio-record.h
49
bio->bi_iter = bd->bi_iter;
drivers/md/dm-bio-record.h
50
atomic_set(&bio->__bi_remaining, bd->__bi_remaining);
drivers/md/dm-bio-record.h
51
bio->bi_end_io = bd->bi_end_io;
drivers/md/dm-bio-record.h
53
bio->bi_integrity = bd->bi_integrity;
drivers/md/dm-bufio.c
1327
static void bio_complete(struct bio *bio)
drivers/md/dm-bufio.c
1329
struct dm_buffer *b = bio->bi_private;
drivers/md/dm-bufio.c
1330
blk_status_t status = bio->bi_status;
drivers/md/dm-bufio.c
1332
bio_uninit(bio);
drivers/md/dm-bufio.c
1333
kfree(bio);
drivers/md/dm-bufio.c
1341
struct bio *bio;
drivers/md/dm-bufio.c
1345
bio = bio_kmalloc(1, GFP_NOWAIT);
drivers/md/dm-bufio.c
1346
if (!bio) {
drivers/md/dm-bufio.c
1350
bio_init_inline(bio, b->c->bdev, 1, op);
drivers/md/dm-bufio.c
1351
bio->bi_iter.bi_sector = sector;
drivers/md/dm-bufio.c
1352
bio->bi_end_io = bio_complete;
drivers/md/dm-bufio.c
1353
bio->bi_private = b;
drivers/md/dm-bufio.c
1354
bio->bi_ioprio = ioprio;
drivers/md/dm-bufio.c
1359
bio_add_virt_nofail(bio, ptr, len);
drivers/md/dm-bufio.c
1361
submit_bio(bio);
drivers/md/dm-cache-target.c
1009
static bool discard_or_flush(struct bio *bio)
drivers/md/dm-cache-target.c
1011
return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
drivers/md/dm-cache-target.c
1014
static void calc_discard_block_range(struct cache *cache, struct bio *bio,
drivers/md/dm-cache-target.c
1017
sector_t sb = bio->bi_iter.bi_sector;
drivers/md/dm-cache-target.c
1018
sector_t se = bio_end_sector(bio);
drivers/md/dm-cache-target.c
1064
static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
1066
return (bio_data_dir(bio) == WRITE) &&
drivers/md/dm-cache-target.c
1067
(bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
drivers/md/dm-cache-target.c
107
struct bio *bio;
drivers/md/dm-cache-target.c
1070
static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
drivers/md/dm-cache-target.c
1073
(is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
drivers/md/dm-cache-target.c
1119
static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
1121
struct per_bio_data *pb = get_per_bio_data(bio);
drivers/md/dm-cache-target.c
1128
static void overwrite_endio(struct bio *bio)
drivers/md/dm-cache-target.c
1130
struct dm_cache_migration *mg = bio->bi_private;
drivers/md/dm-cache-target.c
1132
struct per_bio_data *pb = get_per_bio_data(bio);
drivers/md/dm-cache-target.c
1134
dm_unhook_bio(&pb->hook_info, bio);
drivers/md/dm-cache-target.c
1136
if (bio->bi_status)
drivers/md/dm-cache-target.c
1137
mg->k.input = bio->bi_status;
drivers/md/dm-cache-target.c
1145
struct bio *bio = mg->overwrite_bio;
drivers/md/dm-cache-target.c
1146
struct per_bio_data *pb = get_per_bio_data(bio);
drivers/md/dm-cache-target.c
1148
dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
drivers/md/dm-cache-target.c
1155
remap_to_cache(mg->cache, bio, mg->op->cblock);
drivers/md/dm-cache-target.c
1157
remap_to_origin(mg->cache, bio);
drivers/md/dm-cache-target.c
1160
accounted_request(mg->cache, bio);
drivers/md/dm-cache-target.c
132
while ((bio = bio_list_pop(&bios))) {
drivers/md/dm-cache-target.c
134
bio->bi_status = r;
drivers/md/dm-cache-target.c
135
bio_endio(bio);
drivers/md/dm-cache-target.c
137
b->issue_op(bio, b->issue_context);
drivers/md/dm-cache-target.c
1433
static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
drivers/md/dm-cache-target.c
144
void (*issue_op)(struct bio *bio, void *),
drivers/md/dm-cache-target.c
1445
mg->overwrite_bio = bio;
drivers/md/dm-cache-target.c
1447
if (!bio)
drivers/md/dm-cache-target.c
1565
dm_oblock_t oblock, struct bio *bio)
drivers/md/dm-cache-target.c
1574
mg->overwrite_bio = bio;
drivers/md/dm-cache-target.c
1604
static void inc_hit_counter(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
1606
atomic_inc(bio_data_dir(bio) == READ ?
drivers/md/dm-cache-target.c
1610
static void inc_miss_counter(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
1612
atomic_inc(bio_data_dir(bio) == READ ?
drivers/md/dm-cache-target.c
1618
static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
drivers/md/dm-cache-target.c
1627
rb = bio_detain_shared(cache, block, bio);
drivers/md/dm-cache-target.c
1639
data_dir = bio_data_dir(bio);
drivers/md/dm-cache-target.c
1641
if (optimisable_bio(cache, bio, block)) {
drivers/md/dm-cache-target.c
1648
bio_io_error(bio);
drivers/md/dm-cache-target.c
1653
bio_drop_shared_lock(cache, bio);
drivers/md/dm-cache-target.c
1655
mg_start(cache, op, bio);
drivers/md/dm-cache-target.c
1663
bio_io_error(bio);
drivers/md/dm-cache-target.c
1672
struct per_bio_data *pb = get_per_bio_data(bio);
drivers/md/dm-cache-target.c
1677
inc_miss_counter(cache, bio);
drivers/md/dm-cache-target.c
1679
accounted_begin(cache, bio);
drivers/md/dm-cache-target.c
1680
remap_to_origin_clear_discard(cache, bio, block);
drivers/md/dm-cache-target.c
1686
bio_endio(bio);
drivers/md/dm-cache-target.c
1693
inc_hit_counter(cache, bio);
drivers/md/dm-cache-target.c
1700
if (bio_data_dir(bio) == WRITE) {
drivers/md/dm-cache-target.c
1701
bio_drop_shared_lock(cache, bio);
drivers/md/dm-cache-target.c
1703
invalidate_start(cache, cblock, block, bio);
drivers/md/dm-cache-target.c
1705
remap_to_origin_clear_discard(cache, bio, block);
drivers/md/dm-cache-target.c
1707
if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
drivers/md/dm-cache-target.c
1709
remap_to_origin_and_cache(cache, bio, block, cblock);
drivers/md/dm-cache-target.c
1710
accounted_begin(cache, bio);
drivers/md/dm-cache-target.c
1712
remap_to_cache_dirty(cache, bio, block, cblock);
drivers/md/dm-cache-target.c
1719
if (bio->bi_opf & REQ_FUA) {
drivers/md/dm-cache-target.c
1724
accounted_complete(cache, bio);
drivers/md/dm-cache-target.c
1725
issue_after_commit(&cache->committer, bio);
drivers/md/dm-cache-target.c
1733
static bool process_bio(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
1737
if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
drivers/md/dm-cache-target.c
1738
dm_submit_bio_remap(bio, NULL);
drivers/md/dm-cache-target.c
1776
static bool process_flush_bio(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
1778
struct per_bio_data *pb = get_per_bio_data(bio);
drivers/md/dm-cache-target.c
1781
remap_to_origin(cache, bio);
drivers/md/dm-cache-target.c
1783
remap_to_cache(cache, bio, 0);
drivers/md/dm-cache-target.c
1785
issue_after_commit(&cache->committer, bio);
drivers/md/dm-cache-target.c
1789
static bool process_discard_bio(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
1798
calc_discard_block_range(cache, bio, &b, &e);
drivers/md/dm-cache-target.c
1805
remap_to_origin(cache, bio);
drivers/md/dm-cache-target.c
1806
dm_submit_bio_remap(bio, NULL);
drivers/md/dm-cache-target.c
1808
bio_endio(bio);
drivers/md/dm-cache-target.c
1819
struct bio *bio;
drivers/md/dm-cache-target.c
182
static void issue_after_commit(struct batcher *b, struct bio *bio)
drivers/md/dm-cache-target.c
1827
while ((bio = bio_list_pop(&bios))) {
drivers/md/dm-cache-target.c
1828
if (bio->bi_opf & REQ_PREFLUSH)
drivers/md/dm-cache-target.c
1829
commit_needed = process_flush_bio(cache, bio) || commit_needed;
drivers/md/dm-cache-target.c
1831
else if (bio_op(bio) == REQ_OP_DISCARD)
drivers/md/dm-cache-target.c
1832
commit_needed = process_discard_bio(cache, bio) || commit_needed;
drivers/md/dm-cache-target.c
1835
commit_needed = process_bio(cache, bio) || commit_needed;
drivers/md/dm-cache-target.c
1850
struct bio *bio;
drivers/md/dm-cache-target.c
1856
while ((bio = bio_list_pop(&bios))) {
drivers/md/dm-cache-target.c
1857
bio->bi_status = BLK_STS_DM_REQUEUE;
drivers/md/dm-cache-target.c
1858
bio_endio(bio);
drivers/md/dm-cache-target.c
188
bio_list_add(&b->bios, bio);
drivers/md/dm-cache-target.c
220
static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
drivers/md/dm-cache-target.c
223
h->bi_end_io = bio->bi_end_io;
drivers/md/dm-cache-target.c
225
bio->bi_end_io = bi_end_io;
drivers/md/dm-cache-target.c
226
bio->bi_private = bi_private;
drivers/md/dm-cache-target.c
229
static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
drivers/md/dm-cache-target.c
231
bio->bi_end_io = h->bi_end_io;
drivers/md/dm-cache-target.c
2644
static int cache_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-cache-target.c
2650
dm_oblock_t block = get_bio_block(cache, bio);
drivers/md/dm-cache-target.c
2652
init_per_bio_data(bio);
drivers/md/dm-cache-target.c
2659
remap_to_origin(cache, bio);
drivers/md/dm-cache-target.c
2660
accounted_begin(cache, bio);
drivers/md/dm-cache-target.c
2664
if (discard_or_flush(bio)) {
drivers/md/dm-cache-target.c
2665
defer_bio(cache, bio);
drivers/md/dm-cache-target.c
2669
r = map_bio(cache, bio, block, &commit_needed);
drivers/md/dm-cache-target.c
2676
static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
drivers/md/dm-cache-target.c
2680
struct per_bio_data *pb = get_per_bio_data(bio);
drivers/md/dm-cache-target.c
2690
bio_drop_shared_lock(cache, bio);
drivers/md/dm-cache-target.c
2691
accounted_complete(cache, bio);
drivers/md/dm-cache-target.c
430
struct bio *overwrite_bio;
drivers/md/dm-cache-target.c
527
static unsigned int lock_level(struct bio *bio)
drivers/md/dm-cache-target.c
529
return bio_data_dir(bio) == WRITE ?
drivers/md/dm-cache-target.c
540
static struct per_bio_data *get_per_bio_data(struct bio *bio)
drivers/md/dm-cache-target.c
542
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
drivers/md/dm-cache-target.c
548
static struct per_bio_data *init_per_bio_data(struct bio *bio)
drivers/md/dm-cache-target.c
550
struct per_bio_data *pb = get_per_bio_data(bio);
drivers/md/dm-cache-target.c
553
pb->req_nr = dm_bio_get_target_bio_nr(bio);
drivers/md/dm-cache-target.c
562
static void defer_bio(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
565
bio_list_add(&cache->deferred_bios, bio);
drivers/md/dm-cache-target.c
582
static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
drivers/md/dm-cache-target.c
593
r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
drivers/md/dm-cache-target.c
605
pb = get_per_bio_data(bio);
drivers/md/dm-cache-target.c
724
static void remap_to_origin(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
726
bio_set_dev(bio, cache->origin_dev->bdev);
drivers/md/dm-cache-target.c
729
static void remap_to_cache(struct cache *cache, struct bio *bio,
drivers/md/dm-cache-target.c
732
sector_t bi_sector = bio->bi_iter.bi_sector;
drivers/md/dm-cache-target.c
735
bio_set_dev(bio, cache->cache_dev->bdev);
drivers/md/dm-cache-target.c
737
bio->bi_iter.bi_sector =
drivers/md/dm-cache-target.c
741
bio->bi_iter.bi_sector =
drivers/md/dm-cache-target.c
746
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
751
if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
drivers/md/dm-cache-target.c
752
bio_op(bio) != REQ_OP_DISCARD) {
drivers/md/dm-cache-target.c
753
pb = get_per_bio_data(bio);
drivers/md/dm-cache-target.c
760
static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
drivers/md/dm-cache-target.c
764
check_if_tick_bio_needed(cache, bio);
drivers/md/dm-cache-target.c
765
remap_to_origin(cache, bio);
drivers/md/dm-cache-target.c
766
if (bio_data_dir(bio) == WRITE)
drivers/md/dm-cache-target.c
770
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
drivers/md/dm-cache-target.c
773
check_if_tick_bio_needed(cache, bio);
drivers/md/dm-cache-target.c
774
remap_to_cache(cache, bio, cblock);
drivers/md/dm-cache-target.c
775
if (bio_data_dir(bio) == WRITE) {
drivers/md/dm-cache-target.c
781
static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
783
sector_t block_nr = bio->bi_iter.bi_sector;
drivers/md/dm-cache-target.c
793
static bool accountable_bio(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
795
return bio_op(bio) != REQ_OP_DISCARD;
drivers/md/dm-cache-target.c
798
static void accounted_begin(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
802
if (accountable_bio(cache, bio)) {
drivers/md/dm-cache-target.c
803
pb = get_per_bio_data(bio);
drivers/md/dm-cache-target.c
804
pb->len = bio_sectors(bio);
drivers/md/dm-cache-target.c
809
static void accounted_complete(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
811
struct per_bio_data *pb = get_per_bio_data(bio);
drivers/md/dm-cache-target.c
816
static void accounted_request(struct cache *cache, struct bio *bio)
drivers/md/dm-cache-target.c
818
accounted_begin(cache, bio);
drivers/md/dm-cache-target.c
819
dm_submit_bio_remap(bio, NULL);
drivers/md/dm-cache-target.c
822
static void issue_op(struct bio *bio, void *context)
drivers/md/dm-cache-target.c
826
accounted_request(cache, bio);
drivers/md/dm-cache-target.c
833
static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
drivers/md/dm-cache-target.c
836
struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio,
drivers/md/dm-cache-target.c
84
void (*issue_op)(struct bio *bio, void *context);
drivers/md/dm-cache-target.c
841
bio_chain(origin_bio, bio);
drivers/md/dm-cache-target.c
847
remap_to_cache(cache, bio, cblock);
drivers/md/dm-clone-target.c
1178
struct bio *bio;
drivers/md/dm-clone-target.c
1194
bio_list_for_each(bio, &discards) {
drivers/md/dm-clone-target.c
1195
bio_region_range(clone, bio, &rs, &nr_regions);
drivers/md/dm-clone-target.c
1207
while ((bio = bio_list_pop(&discards)))
drivers/md/dm-clone-target.c
1208
complete_discard_bio(clone, bio, r == 0);
drivers/md/dm-clone-target.c
1228
struct bio *bio;
drivers/md/dm-clone-target.c
1250
while ((bio = bio_list_pop(&bios)))
drivers/md/dm-clone-target.c
1251
bio_io_error(bio);
drivers/md/dm-clone-target.c
1258
while ((bio = bio_list_pop(&bio_completions)))
drivers/md/dm-clone-target.c
1259
bio_endio(bio);
drivers/md/dm-clone-target.c
1261
while ((bio = bio_list_pop(&bios))) {
drivers/md/dm-clone-target.c
1262
if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) {
drivers/md/dm-clone-target.c
1267
bio_endio(bio);
drivers/md/dm-clone-target.c
1269
submit_bio_noacct(bio);
drivers/md/dm-clone-target.c
1314
static int clone_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-clone-target.c
1331
if (bio->bi_opf & REQ_PREFLUSH) {
drivers/md/dm-clone-target.c
1332
remap_and_issue(clone, bio);
drivers/md/dm-clone-target.c
1336
bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
drivers/md/dm-clone-target.c
1343
if (bio_op(bio) == REQ_OP_DISCARD) {
drivers/md/dm-clone-target.c
1344
process_discard_bio(clone, bio);
drivers/md/dm-clone-target.c
1358
region_nr = bio_to_region(clone, bio);
drivers/md/dm-clone-target.c
1360
remap_and_issue(clone, bio);
drivers/md/dm-clone-target.c
1362
} else if (bio_data_dir(bio) == READ) {
drivers/md/dm-clone-target.c
1363
remap_to_source(clone, bio);
drivers/md/dm-clone-target.c
1367
remap_to_dest(clone, bio);
drivers/md/dm-clone-target.c
1368
hydrate_bio_region(clone, bio);
drivers/md/dm-clone-target.c
1373
static int clone_endio(struct dm_target *ti, struct bio *bio, blk_status_t *error)
drivers/md/dm-clone-target.c
259
static inline void remap_to_source(struct clone *clone, struct bio *bio)
drivers/md/dm-clone-target.c
261
bio_set_dev(bio, clone->source_dev->bdev);
drivers/md/dm-clone-target.c
264
static inline void remap_to_dest(struct clone *clone, struct bio *bio)
drivers/md/dm-clone-target.c
266
bio_set_dev(bio, clone->dest_dev->bdev);
drivers/md/dm-clone-target.c
269
static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
drivers/md/dm-clone-target.c
271
return op_is_flush(bio->bi_opf) &&
drivers/md/dm-clone-target.c
282
static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
drivers/md/dm-clone-target.c
284
return (bio->bi_iter.bi_sector >> clone->region_shift);
drivers/md/dm-clone-target.c
288
static void bio_region_range(struct clone *clone, struct bio *bio,
drivers/md/dm-clone-target.c
293
*rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
drivers/md/dm-clone-target.c
294
end = bio_end_sector(bio) >> clone->region_shift;
drivers/md/dm-clone-target.c
303
static inline bool is_overwrite_bio(struct clone *clone, struct bio *bio)
drivers/md/dm-clone-target.c
305
return (bio_data_dir(bio) == WRITE && bio_sectors(bio) == clone->region_size);
drivers/md/dm-clone-target.c
310
struct bio *bio;
drivers/md/dm-clone-target.c
312
while ((bio = bio_list_pop(bios))) {
drivers/md/dm-clone-target.c
313
bio->bi_status = status;
drivers/md/dm-clone-target.c
314
bio_endio(bio);
drivers/md/dm-clone-target.c
320
struct bio *bio;
drivers/md/dm-clone-target.c
325
while ((bio = bio_list_pop(bios)))
drivers/md/dm-clone-target.c
326
submit_bio_noacct(bio);
drivers/md/dm-clone-target.c
339
static void issue_bio(struct clone *clone, struct bio *bio)
drivers/md/dm-clone-target.c
341
if (!bio_triggers_commit(clone, bio)) {
drivers/md/dm-clone-target.c
342
submit_bio_noacct(bio);
drivers/md/dm-clone-target.c
351
bio_io_error(bio);
drivers/md/dm-clone-target.c
360
bio_list_add(&clone->deferred_flush_bios, bio);
drivers/md/dm-clone-target.c
372
static void remap_and_issue(struct clone *clone, struct bio *bio)
drivers/md/dm-clone-target.c
374
remap_to_dest(clone, bio);
drivers/md/dm-clone-target.c
375
issue_bio(clone, bio);
drivers/md/dm-clone-target.c
387
struct bio *bio;
drivers/md/dm-clone-target.c
395
while ((bio = bio_list_pop(bios))) {
drivers/md/dm-clone-target.c
396
if (bio_triggers_commit(clone, bio))
drivers/md/dm-clone-target.c
397
bio_list_add(&flush_bios, bio);
drivers/md/dm-clone-target.c
399
bio_list_add(&normal_bios, bio);
drivers/md/dm-clone-target.c
410
static void complete_overwrite_bio(struct clone *clone, struct bio *bio)
drivers/md/dm-clone-target.c
424
if (!(bio->bi_opf & REQ_FUA)) {
drivers/md/dm-clone-target.c
425
bio_endio(bio);
drivers/md/dm-clone-target.c
434
bio_io_error(bio);
drivers/md/dm-clone-target.c
443
bio_list_add(&clone->deferred_flush_completions, bio);
drivers/md/dm-clone-target.c
449
static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
drivers/md/dm-clone-target.c
451
bio->bi_iter.bi_sector = sector;
drivers/md/dm-clone-target.c
452
bio->bi_iter.bi_size = to_bytes(len);
drivers/md/dm-clone-target.c
455
static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
drivers/md/dm-clone-target.c
465
remap_to_dest(clone, bio);
drivers/md/dm-clone-target.c
466
bio_region_range(clone, bio, &rs, &nr_regions);
drivers/md/dm-clone-target.c
467
trim_bio(bio, region_to_sector(clone, rs),
drivers/md/dm-clone-target.c
469
submit_bio_noacct(bio);
drivers/md/dm-clone-target.c
471
bio_endio(bio);
drivers/md/dm-clone-target.c
474
static void process_discard_bio(struct clone *clone, struct bio *bio)
drivers/md/dm-clone-target.c
478
bio_region_range(clone, bio, &rs, &nr_regions);
drivers/md/dm-clone-target.c
480
bio_endio(bio);
drivers/md/dm-clone-target.c
489
(unsigned long long)bio->bi_iter.bi_sector,
drivers/md/dm-clone-target.c
490
bio_sectors(bio));
drivers/md/dm-clone-target.c
491
bio_endio(bio);
drivers/md/dm-clone-target.c
500
complete_discard_bio(clone, bio, true);
drivers/md/dm-clone-target.c
510
bio_endio(bio);
drivers/md/dm-clone-target.c
518
bio_list_add(&clone->deferred_discard_bios, bio);
drivers/md/dm-clone-target.c
533
struct bio *overwrite_bio;
drivers/md/dm-clone-target.c
837
static void overwrite_endio(struct bio *bio)
drivers/md/dm-clone-target.c
839
struct dm_clone_region_hydration *hd = bio->bi_private;
drivers/md/dm-clone-target.c
841
bio->bi_end_io = hd->overwrite_bio_end_io;
drivers/md/dm-clone-target.c
842
hd->status = bio->bi_status;
drivers/md/dm-clone-target.c
847
static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio *bio)
drivers/md/dm-clone-target.c
854
hd->overwrite_bio = bio;
drivers/md/dm-clone-target.c
855
hd->overwrite_bio_end_io = bio->bi_end_io;
drivers/md/dm-clone-target.c
857
bio->bi_end_io = overwrite_endio;
drivers/md/dm-clone-target.c
858
bio->bi_private = hd;
drivers/md/dm-clone-target.c
861
submit_bio_noacct(bio);
drivers/md/dm-clone-target.c
874
static void hydrate_bio_region(struct clone *clone, struct bio *bio)
drivers/md/dm-clone-target.c
880
region_nr = bio_to_region(clone, bio);
drivers/md/dm-clone-target.c
888
bio_list_add(&hd->deferred_bios, bio);
drivers/md/dm-clone-target.c
896
issue_bio(clone, bio);
drivers/md/dm-clone-target.c
915
issue_bio(clone, bio);
drivers/md/dm-clone-target.c
922
bio_list_add(&hd2->deferred_bios, bio);
drivers/md/dm-clone-target.c
937
bio_io_error(bio);
drivers/md/dm-clone-target.c
948
if (is_overwrite_bio(clone, bio)) {
drivers/md/dm-clone-target.c
950
hydration_overwrite(hd, bio);
drivers/md/dm-clone-target.c
952
bio_list_add(&hd->deferred_bios, bio);
drivers/md/dm-core.h
249
struct bio clone;
drivers/md/dm-core.h
298
struct bio *orig_bio;
drivers/md/dm-crypt.c
1136
static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
drivers/md/dm-crypt.c
1142
if (!bio_sectors(bio) || !io->cc->tuple_size)
drivers/md/dm-crypt.c
1145
bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
drivers/md/dm-crypt.c
1149
tag_len = io->cc->tuple_size * (bio_sectors(bio) >> io->cc->sector_shift);
drivers/md/dm-crypt.c
1151
bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
drivers/md/dm-crypt.c
1153
ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
drivers/md/dm-crypt.c
1210
struct bio *bio_out, struct bio *bio_in,
drivers/md/dm-crypt.c
1522
struct skcipher_request *req, struct bio *base_bio)
drivers/md/dm-crypt.c
1531
struct aead_request *req, struct bio *base_bio)
drivers/md/dm-crypt.c
1539
static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
drivers/md/dm-crypt.c
1643
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
drivers/md/dm-crypt.c
1665
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
drivers/md/dm-crypt.c
1668
struct bio *clone;
drivers/md/dm-crypt.c
1736
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
drivers/md/dm-crypt.c
1754
struct bio *bio, sector_t sector)
drivers/md/dm-crypt.c
1757
io->base_bio = bio;
drivers/md/dm-crypt.c
1782
struct bio *base_bio = io->base_bio;
drivers/md/dm-crypt.c
1827
static void crypt_endio(struct bio *clone)
drivers/md/dm-crypt.c
1863
struct bio *clone;
drivers/md/dm-crypt.c
1926
struct bio *clone = io->ctx.bio_out;
drivers/md/dm-crypt.c
1987
struct bio *clone = io->ctx.bio_out;
drivers/md/dm-crypt.c
2080
struct bio *clone;
drivers/md/dm-crypt.c
258
static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio, bool no_split)
drivers/md/dm-crypt.c
262
bool wrt = op_is_write(bio_op(bio));
drivers/md/dm-crypt.c
285
static void crypt_endio(struct bio *clone);
drivers/md/dm-crypt.c
3455
static int crypt_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-crypt.c
3467
if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
drivers/md/dm-crypt.c
3468
bio_op(bio) == REQ_OP_DISCARD)) {
drivers/md/dm-crypt.c
3469
bio_set_dev(bio, cc->dev->bdev);
drivers/md/dm-crypt.c
3470
if (bio_sectors(bio))
drivers/md/dm-crypt.c
3471
bio->bi_iter.bi_sector = cc->start +
drivers/md/dm-crypt.c
3472
dm_target_offset(ti, bio->bi_iter.bi_sector);
drivers/md/dm-crypt.c
3484
no_split = (ti->emulate_zone_append && op_is_write(bio_op(bio))) ||
drivers/md/dm-crypt.c
3485
(bio->bi_opf & REQ_ATOMIC);
drivers/md/dm-crypt.c
3486
max_sectors = get_max_request_sectors(ti, bio, no_split);
drivers/md/dm-crypt.c
3487
if (unlikely(bio_sectors(bio) > max_sectors)) {
drivers/md/dm-crypt.c
3490
dm_accept_partial_bio(bio, max_sectors);
drivers/md/dm-crypt.c
3497
if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
drivers/md/dm-crypt.c
3500
if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
drivers/md/dm-crypt.c
3503
io = dm_per_bio_data(bio, cc->per_bio_data_size);
drivers/md/dm-crypt.c
3504
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
drivers/md/dm-crypt.c
3507
unsigned int tag_len = cc->tuple_size * (bio_sectors(bio) >> cc->sector_shift);
drivers/md/dm-crypt.c
3515
if (bio_sectors(bio) > cc->tag_pool_max_sectors)
drivers/md/dm-crypt.c
3516
dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
drivers/md/dm-crypt.c
60
struct bio *bio_in;
drivers/md/dm-crypt.c
62
struct bio *bio_out;
drivers/md/dm-crypt.c
81
struct bio *base_bio;
drivers/md/dm-delay.c
102
struct bio *bio = dm_bio_from_per_bio_data(delayed,
drivers/md/dm-delay.c
105
bio_list_add(&flush_bio_list, bio);
drivers/md/dm-delay.c
314
static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
drivers/md/dm-delay.c
322
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
drivers/md/dm-delay.c
364
static int delay_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-delay.c
368
struct dm_delay_info *delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
drivers/md/dm-delay.c
370
if (bio_data_dir(bio) == WRITE) {
drivers/md/dm-delay.c
371
if (unlikely(bio->bi_opf & REQ_PREFLUSH))
drivers/md/dm-delay.c
379
bio_set_dev(bio, c->dev->bdev);
drivers/md/dm-delay.c
380
bio->bi_iter.bi_sector = c->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
drivers/md/dm-delay.c
382
return delay_bio(dc, c, bio);
drivers/md/dm-delay.c
74
static void flush_bios(struct bio *bio)
drivers/md/dm-delay.c
76
struct bio *n;
drivers/md/dm-delay.c
78
while (bio) {
drivers/md/dm-delay.c
79
n = bio->bi_next;
drivers/md/dm-delay.c
80
bio->bi_next = NULL;
drivers/md/dm-delay.c
81
dm_submit_bio_remap(bio, NULL);
drivers/md/dm-delay.c
82
bio = n;
drivers/md/dm-dust.c
224
static int dust_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-dust.c
229
bio_set_dev(bio, dd->dev->bdev);
drivers/md/dm-dust.c
230
bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
drivers/md/dm-dust.c
232
if (bio_data_dir(bio) == READ)
drivers/md/dm-dust.c
233
r = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
drivers/md/dm-dust.c
235
r = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
drivers/md/dm-ebs-target.c
122
static int __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio)
drivers/md/dm-ebs-target.c
128
bio_for_each_bvec(bv, bio, iter) {
drivers/md/dm-ebs-target.c
143
static int __ebs_discard_bio(struct ebs_c *ec, struct bio *bio)
drivers/md/dm-ebs-target.c
145
sector_t block, blocks, sector = bio->bi_iter.bi_sector;
drivers/md/dm-ebs-target.c
148
blocks = __nr_blocks(ec, bio);
drivers/md/dm-ebs-target.c
160
if (blocks && __block_mod(bio_end_sector(bio), ec->u_bs))
drivers/md/dm-ebs-target.c
167
static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio)
drivers/md/dm-ebs-target.c
169
sector_t blocks, sector = bio->bi_iter.bi_sector;
drivers/md/dm-ebs-target.c
171
blocks = __nr_blocks(ec, bio);
drivers/md/dm-ebs-target.c
183
struct bio *bio;
drivers/md/dm-ebs-target.c
194
bio_list_for_each(bio, &bios) {
drivers/md/dm-ebs-target.c
195
block1 = __sector_to_block(ec, bio->bi_iter.bi_sector);
drivers/md/dm-ebs-target.c
196
if (bio_op(bio) == REQ_OP_READ)
drivers/md/dm-ebs-target.c
197
dm_bufio_prefetch(ec->bufio, block1, __nr_blocks(ec, bio));
drivers/md/dm-ebs-target.c
198
else if (bio_op(bio) == REQ_OP_WRITE && !(bio->bi_opf & REQ_PREFLUSH)) {
drivers/md/dm-ebs-target.c
199
block2 = __sector_to_block(ec, bio_end_sector(bio));
drivers/md/dm-ebs-target.c
200
if (__block_mod(bio->bi_iter.bi_sector, ec->u_bs))
drivers/md/dm-ebs-target.c
202
if (__block_mod(bio_end_sector(bio), ec->u_bs) && block2 != block1)
drivers/md/dm-ebs-target.c
207
bio_list_for_each(bio, &bios) {
drivers/md/dm-ebs-target.c
209
if (bio_op(bio) == REQ_OP_READ)
drivers/md/dm-ebs-target.c
210
r = __ebs_rw_bio(ec, REQ_OP_READ, bio);
drivers/md/dm-ebs-target.c
211
else if (bio_op(bio) == REQ_OP_WRITE) {
drivers/md/dm-ebs-target.c
213
r = __ebs_rw_bio(ec, REQ_OP_WRITE, bio);
drivers/md/dm-ebs-target.c
214
} else if (bio_op(bio) == REQ_OP_DISCARD) {
drivers/md/dm-ebs-target.c
215
__ebs_forget_bio(ec, bio);
drivers/md/dm-ebs-target.c
216
r = __ebs_discard_bio(ec, bio);
drivers/md/dm-ebs-target.c
220
bio->bi_status = errno_to_blk_status(r);
drivers/md/dm-ebs-target.c
229
while ((bio = bio_list_pop(&bios))) {
drivers/md/dm-ebs-target.c
231
if (unlikely(r && bio_op(bio) == REQ_OP_WRITE))
drivers/md/dm-ebs-target.c
232
bio_io_error(bio);
drivers/md/dm-ebs-target.c
234
bio_endio(bio);
drivers/md/dm-ebs-target.c
361
static int ebs_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-ebs-target.c
365
bio_set_dev(bio, ec->dev->bdev);
drivers/md/dm-ebs-target.c
366
bio->bi_iter.bi_sector = ec->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
drivers/md/dm-ebs-target.c
368
if (unlikely(bio_op(bio) == REQ_OP_FLUSH))
drivers/md/dm-ebs-target.c
375
if (likely(__block_mod(bio->bi_iter.bi_sector, ec->u_bs) ||
drivers/md/dm-ebs-target.c
376
__block_mod(bio_end_sector(bio), ec->u_bs) ||
drivers/md/dm-ebs-target.c
379
bio_list_add(&ec->bios_in, bio);
drivers/md/dm-ebs-target.c
388
__ebs_forget_bio(ec, bio);
drivers/md/dm-ebs-target.c
48
static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio)
drivers/md/dm-ebs-target.c
50
sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio);
drivers/md/dm-era-target.c
1217
static dm_block_t get_block(struct era *era, struct bio *bio)
drivers/md/dm-era-target.c
1219
sector_t block_nr = bio->bi_iter.bi_sector;
drivers/md/dm-era-target.c
1229
static void remap_to_origin(struct era *era, struct bio *bio)
drivers/md/dm-era-target.c
1231
bio_set_dev(bio, era->origin_dev->bdev);
drivers/md/dm-era-target.c
1265
struct bio *bio;
drivers/md/dm-era-target.c
1281
while ((bio = bio_list_pop(&deferred_bios))) {
drivers/md/dm-era-target.c
1283
get_block(era, bio));
drivers/md/dm-era-target.c
1293
bio_list_add(&marked_bios, bio);
drivers/md/dm-era-target.c
1303
while ((bio = bio_list_pop(&marked_bios)))
drivers/md/dm-era-target.c
1304
bio_io_error(bio);
drivers/md/dm-era-target.c
1307
while ((bio = bio_list_pop(&marked_bios))) {
drivers/md/dm-era-target.c
1313
set_bit(get_block(era, bio), ws->bits);
drivers/md/dm-era-target.c
1314
submit_bio_noacct(bio);
drivers/md/dm-era-target.c
1366
static void defer_bio(struct era *era, struct bio *bio)
drivers/md/dm-era-target.c
1369
bio_list_add(&era->deferred_bios, bio);
drivers/md/dm-era-target.c
1560
static int era_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-era-target.c
1563
dm_block_t block = get_block(era, bio);
drivers/md/dm-era-target.c
1570
remap_to_origin(era, bio);
drivers/md/dm-era-target.c
1575
if (!(bio->bi_opf & REQ_PREFLUSH) &&
drivers/md/dm-era-target.c
1576
(bio_data_dir(bio) == WRITE) &&
drivers/md/dm-era-target.c
1578
defer_bio(era, bio);
drivers/md/dm-flakey.c
21
#define all_corrupt_bio_flags_match(bio, fc) \
drivers/md/dm-flakey.c
22
(((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
drivers/md/dm-flakey.c
352
static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
drivers/md/dm-flakey.c
356
bio_set_dev(bio, fc->dev->bdev);
drivers/md/dm-flakey.c
357
bio->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
drivers/md/dm-flakey.c
360
static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
drivers/md/dm-flakey.c
371
__bio_for_each_segment(bvec, bio, iter, start) {
drivers/md/dm-flakey.c
372
if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
drivers/md/dm-flakey.c
378
bio, corrupt_bio_value, corrupt_bio_byte,
drivers/md/dm-flakey.c
379
(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
drivers/md/dm-flakey.c
384
corrupt_bio_byte -= bio_iter_len(bio, iter);
drivers/md/dm-flakey.c
388
static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc,
drivers/md/dm-flakey.c
393
corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value, start);
drivers/md/dm-flakey.c
396
static void corrupt_bio_random(struct bio *bio, struct bvec_iter start)
drivers/md/dm-flakey.c
404
corrupt_bio_common(bio, corrupt_byte, corrupt_value, start);
drivers/md/dm-flakey.c
407
static void clone_free(struct bio *clone)
drivers/md/dm-flakey.c
420
static void clone_endio(struct bio *clone)
drivers/md/dm-flakey.c
422
struct bio *bio = clone->bi_private;
drivers/md/dm-flakey.c
423
bio->bi_status = clone->bi_status;
drivers/md/dm-flakey.c
425
bio_endio(bio);
drivers/md/dm-flakey.c
428
static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct bio *bio)
drivers/md/dm-flakey.c
430
struct bio *clone;
drivers/md/dm-flakey.c
432
struct bvec_iter iter = bio->bi_iter;
drivers/md/dm-flakey.c
434
if (unlikely(bio->bi_iter.bi_size > UIO_MAXIOV << PAGE_SHIFT))
drivers/md/dm-flakey.c
435
dm_accept_partial_bio(bio, UIO_MAXIOV << PAGE_SHIFT >> SECTOR_SHIFT);
drivers/md/dm-flakey.c
437
size = bio->bi_iter.bi_size;
drivers/md/dm-flakey.c
444
bio_init_inline(clone, fc->dev->bdev, nr_iovecs, bio->bi_opf);
drivers/md/dm-flakey.c
446
clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
drivers/md/dm-flakey.c
447
clone->bi_private = bio;
drivers/md/dm-flakey.c
475
struct bio_vec bvec = bvec_iter_bvec(bio->bi_io_vec, iter);
drivers/md/dm-flakey.c
481
bvec_iter_advance(bio->bi_io_vec, &iter, this_step);
drivers/md/dm-flakey.c
493
static int flakey_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-flakey.c
497
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
drivers/md/dm-flakey.c
501
if (op_is_zone_mgmt(bio_op(bio)))
drivers/md/dm-flakey.c
509
if (bio_has_data(bio)) {
drivers/md/dm-flakey.c
511
pb->saved_iter = bio->bi_iter;
drivers/md/dm-flakey.c
518
if (bio_data_dir(bio) == READ) {
drivers/md/dm-flakey.c
528
bio_endio(bio);
drivers/md/dm-flakey.c
531
bio_io_error(bio);
drivers/md/dm-flakey.c
543
if (all_corrupt_bio_flags_match(bio, fc))
drivers/md/dm-flakey.c
553
struct bio *clone = clone_bio(ti, fc, bio);
drivers/md/dm-flakey.c
568
flakey_map_bio(ti, bio);
drivers/md/dm-flakey.c
573
static int flakey_end_io(struct dm_target *ti, struct bio *bio,
drivers/md/dm-flakey.c
577
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
drivers/md/dm-flakey.c
579
if (op_is_zone_mgmt(bio_op(bio)))
drivers/md/dm-flakey.c
582
if (!*error && pb->bio_can_corrupt && (bio_data_dir(bio) == READ)) {
drivers/md/dm-flakey.c
585
all_corrupt_bio_flags_match(bio, fc)) {
drivers/md/dm-flakey.c
589
corrupt_bio_data(bio, fc, pb->saved_iter);
drivers/md/dm-flakey.c
596
corrupt_bio_random(bio, pb->saved_iter);
drivers/md/dm-integrity.c
1568
struct bio *bio;
drivers/md/dm-integrity.c
1572
bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
1573
bio_list_add(&ic->flush_bio_list, bio);
drivers/md/dm-integrity.c
1579
static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
drivers/md/dm-integrity.c
1584
if (unlikely(r) && !bio->bi_status)
drivers/md/dm-integrity.c
1585
bio->bi_status = errno_to_blk_status(r);
drivers/md/dm-integrity.c
1586
if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
drivers/md/dm-integrity.c
1590
bio_list_add(&ic->synchronous_bios, bio);
drivers/md/dm-integrity.c
1595
bio_endio(bio);
drivers/md/dm-integrity.c
1600
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
1602
if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
drivers/md/dm-integrity.c
1605
do_endio(ic, bio);
drivers/md/dm-integrity.c
1612
struct bio *bio;
drivers/md/dm-integrity.c
1619
bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
1620
if (unlikely(dio->bi_status) && !bio->bi_status)
drivers/md/dm-integrity.c
1621
bio->bi_status = dio->bi_status;
drivers/md/dm-integrity.c
1622
if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
drivers/md/dm-integrity.c
1624
bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
drivers/md/dm-integrity.c
1633
static void integrity_end_io(struct bio *bio)
drivers/md/dm-integrity.c
1635
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
1637
dm_bio_restore(&dio->bio_details, bio);
drivers/md/dm-integrity.c
1638
if (bio->bi_integrity)
drivers/md/dm-integrity.c
1639
bio->bi_opf |= REQ_INTEGRITY;
drivers/md/dm-integrity.c
1800
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
1815
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
drivers/md/dm-integrity.c
1836
alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
drivers/md/dm-integrity.c
1855
bio->bi_bdev, logical_sector);
drivers/md/dm-integrity.c
1858
bio, logical_sector, 0);
drivers/md/dm-integrity.c
1889
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
1943
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
drivers/md/dm-integrity.c
2021
static inline bool dm_integrity_check_limits(struct dm_integrity_c *ic, sector_t logical_sector, struct bio *bio)
drivers/md/dm-integrity.c
2023
if (unlikely(logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
drivers/md/dm-integrity.c
2025
logical_sector, bio_sectors(bio),
drivers/md/dm-integrity.c
2029
if (unlikely((logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
drivers/md/dm-integrity.c
2032
logical_sector, bio_sectors(bio));
drivers/md/dm-integrity.c
2035
if (ic->sectors_per_block > 1 && likely(bio_op(bio) != REQ_OP_DISCARD)) {
drivers/md/dm-integrity.c
2039
bio_for_each_segment(bv, bio, iter) {
drivers/md/dm-integrity.c
2050
static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-integrity.c
2053
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2060
dio->op = bio_op(bio);
drivers/md/dm-integrity.c
2064
bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector);
drivers/md/dm-integrity.c
2073
sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
drivers/md/dm-integrity.c
2076
sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
drivers/md/dm-integrity.c
2081
dm_accept_partial_bio(bio, len);
drivers/md/dm-integrity.c
2086
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
drivers/md/dm-integrity.c
2091
dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
drivers/md/dm-integrity.c
2092
dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
drivers/md/dm-integrity.c
2098
bio->bi_opf &= ~REQ_FUA;
drivers/md/dm-integrity.c
2100
if (unlikely(!dm_integrity_check_limits(ic, dio->range.logical_sector, bio)))
drivers/md/dm-integrity.c
2103
bip = bio_integrity(bio);
drivers/md/dm-integrity.c
2106
unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
drivers/md/dm-integrity.c
2130
bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
drivers/md/dm-integrity.c
2136
static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
drivers/md/dm-integrity.c
2146
struct bio_vec bv = bio_iovec(bio);
drivers/md/dm-integrity.c
2152
bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
drivers/md/dm-integrity.c
2187
struct bio_integrity_payload *bip = bio_integrity(bio);
drivers/md/dm-integrity.c
2267
if (unlikely(bio->bi_iter.bi_size)) {
drivers/md/dm-integrity.c
2282
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2304
do_endio(ic, bio);
drivers/md/dm-integrity.c
2307
dio->range.n_sectors = bio_sectors(bio);
drivers/md/dm-integrity.c
2442
bio_list_add(&bbs->bio_queue, bio);
drivers/md/dm-integrity.c
2457
dm_bio_record(&dio->bio_details, bio);
drivers/md/dm-integrity.c
2458
bio_set_dev(bio, ic->dev->bdev);
drivers/md/dm-integrity.c
2459
bio->bi_integrity = NULL;
drivers/md/dm-integrity.c
2460
bio->bi_opf &= ~REQ_INTEGRITY;
drivers/md/dm-integrity.c
2461
bio->bi_end_io = integrity_end_io;
drivers/md/dm-integrity.c
2462
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
drivers/md/dm-integrity.c
2471
submit_bio_noacct(bio);
drivers/md/dm-integrity.c
2476
submit_bio_noacct(bio);
drivers/md/dm-integrity.c
2489
if (likely(!bio->bi_status))
drivers/md/dm-integrity.c
2502
if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
drivers/md/dm-integrity.c
2511
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2516
if (unlikely(bio_integrity(bio))) {
drivers/md/dm-integrity.c
2517
bio->bi_status = BLK_STS_NOTSUPP;
drivers/md/dm-integrity.c
2518
bio_endio(bio);
drivers/md/dm-integrity.c
2522
bio_set_dev(bio, ic->dev->bdev);
drivers/md/dm-integrity.c
2523
if (unlikely((bio->bi_opf & REQ_PREFLUSH) != 0))
drivers/md/dm-integrity.c
2529
dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block);
drivers/md/dm-integrity.c
2538
if (WARN_ON(!sectors || sectors >= bio_sectors(bio))) {
drivers/md/dm-integrity.c
2539
bio->bi_status = BLK_STS_NOTSUPP;
drivers/md/dm-integrity.c
2540
bio_endio(bio);
drivers/md/dm-integrity.c
2543
dm_accept_partial_bio(bio, sectors);
drivers/md/dm-integrity.c
2549
dio->range.logical_sector = bio->bi_iter.bi_sector;
drivers/md/dm-integrity.c
2550
dio->range.n_sectors = bio_sectors(bio);
drivers/md/dm-integrity.c
2588
dio->bio_details.bi_iter = bio->bi_iter;
drivers/md/dm-integrity.c
2590
if (unlikely(!dm_integrity_check_limits(ic, bio->bi_iter.bi_sector, bio))) {
drivers/md/dm-integrity.c
2594
bio->bi_iter.bi_sector += ic->start + SB_SECTORS;
drivers/md/dm-integrity.c
2596
bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
drivers/md/dm-integrity.c
2598
bio->bi_status = errno_to_blk_status(PTR_ERR(bip));
drivers/md/dm-integrity.c
2599
bio_endio(bio);
drivers/md/dm-integrity.c
2606
struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
drivers/md/dm-integrity.c
2613
bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
drivers/md/dm-integrity.c
2617
ret = bio_integrity_add_page(bio, virt_to_page(dio->integrity_payload),
drivers/md/dm-integrity.c
2620
bio->bi_status = BLK_STS_RESOURCE;
drivers/md/dm-integrity.c
2621
bio_endio(bio);
drivers/md/dm-integrity.c
2642
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2644
struct bio *outgoing_bio;
drivers/md/dm-integrity.c
2666
bio->bi_status = errno_to_blk_status(PTR_ERR(bip));
drivers/md/dm-integrity.c
2667
bio_endio(bio);
drivers/md/dm-integrity.c
2674
bio->bi_status = BLK_STS_RESOURCE;
drivers/md/dm-integrity.c
2675
bio_endio(bio);
drivers/md/dm-integrity.c
2684
bio->bi_status = errno_to_blk_status(r);
drivers/md/dm-integrity.c
2685
bio_endio(bio);
drivers/md/dm-integrity.c
2696
bio, dio->bio_details.bi_iter.bi_sector, 0);
drivers/md/dm-integrity.c
2698
bio->bi_status = BLK_STS_PROTECTION;
drivers/md/dm-integrity.c
2699
bio_endio(bio);
drivers/md/dm-integrity.c
2703
bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
drivers/md/dm-integrity.c
2708
bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
drivers/md/dm-integrity.c
2711
bio_endio(bio);
drivers/md/dm-integrity.c
2716
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2721
struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
drivers/md/dm-integrity.c
2734
bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
drivers/md/dm-integrity.c
2744
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2747
bio_endio(bio);
drivers/md/dm-integrity.c
2750
static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
drivers/md/dm-integrity.c
2753
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2784
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2788
bio->bi_status = BLK_STS_IOERR;
drivers/md/dm-integrity.c
2791
submit_bio_noacct(bio);
drivers/md/dm-integrity.c
2829
struct bio *flushes;
drivers/md/dm-integrity.c
2889
struct bio *next = flushes->bi_next;
drivers/md/dm-integrity.c
3288
struct bio *bio;
drivers/md/dm-integrity.c
3342
bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recalc_bios);
drivers/md/dm-integrity.c
3343
bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
drivers/md/dm-integrity.c
3344
bio_add_virt_nofail(bio, recalc_buffer,
drivers/md/dm-integrity.c
3346
r = submit_bio_wait(bio);
drivers/md/dm-integrity.c
3347
bio_put(bio);
drivers/md/dm-integrity.c
3363
bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_WRITE, GFP_NOIO, &ic->recalc_bios);
drivers/md/dm-integrity.c
3364
bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
drivers/md/dm-integrity.c
3365
bio_add_virt_nofail(bio, recalc_buffer,
drivers/md/dm-integrity.c
3368
bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
drivers/md/dm-integrity.c
3370
bio_put(bio);
drivers/md/dm-integrity.c
3374
ret = bio_integrity_add_page(bio, virt_to_page(recalc_tags), t - recalc_tags, offset_in_page(recalc_tags));
drivers/md/dm-integrity.c
3376
bio_put(bio);
drivers/md/dm-integrity.c
3381
r = submit_bio_wait(bio);
drivers/md/dm-integrity.c
3382
bio_put(bio);
drivers/md/dm-integrity.c
3418
struct bio *bio;
drivers/md/dm-integrity.c
3429
while ((bio = bio_list_pop(&bio_queue))) {
drivers/md/dm-integrity.c
3432
dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
3442
bio_list_add(&waiting, bio);
drivers/md/dm-integrity.c
3453
while ((bio = bio_list_pop(&waiting))) {
drivers/md/dm-integrity.c
3454
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
3472
struct bio *bio;
drivers/md/dm-integrity.c
3500
while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
drivers/md/dm-integrity.c
3501
bio_endio(bio);
drivers/md/dm-io-rewind.c
104
static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
drivers/md/dm-io-rewind.c
110
static inline void dm_bio_rewind_iter(const struct bio *bio,
drivers/md/dm-io-rewind.c
116
if (bio_no_advance_iter(bio))
drivers/md/dm-io-rewind.c
119
dm_bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
drivers/md/dm-io-rewind.c
132
static void dm_bio_rewind(struct bio *bio, unsigned int bytes)
drivers/md/dm-io-rewind.c
134
if (bio_integrity(bio))
drivers/md/dm-io-rewind.c
135
dm_bio_integrity_rewind(bio, bytes);
drivers/md/dm-io-rewind.c
137
if (bio_has_crypt_ctx(bio))
drivers/md/dm-io-rewind.c
138
dm_bio_crypt_rewind(bio, bytes);
drivers/md/dm-io-rewind.c
140
dm_bio_rewind_iter(bio, &bio->bi_iter, bytes);
drivers/md/dm-io-rewind.c
145
struct bio *orig = io->orig_bio;
drivers/md/dm-io-rewind.c
146
struct bio *new_orig = bio_alloc_clone(orig->bi_bdev, orig,
drivers/md/dm-io-rewind.c
56
static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
drivers/md/dm-io-rewind.c
58
struct bio_integrity_payload *bip = bio_integrity(bio);
drivers/md/dm-io-rewind.c
59
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
drivers/md/dm-io-rewind.c
68
static inline void dm_bio_integrity_rewind(struct bio *bio,
drivers/md/dm-io-rewind.c
94
static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
drivers/md/dm-io-rewind.c
96
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
drivers/md/dm-io.c
101
bio->bi_private = (void *)((unsigned long)io | region);
drivers/md/dm-io.c
104
static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
drivers/md/dm-io.c
107
unsigned long val = (unsigned long)bio->bi_private;
drivers/md/dm-io.c
142
static void endio(struct bio *bio)
drivers/md/dm-io.c
148
if (bio->bi_status && bio_data_dir(bio) == READ)
drivers/md/dm-io.c
149
zero_fill_bio(bio);
drivers/md/dm-io.c
154
retrieve_io_and_region_from_bio(bio, &io, &region);
drivers/md/dm-io.c
156
error = bio->bi_status;
drivers/md/dm-io.c
157
bio_put(bio);
drivers/md/dm-io.c
238
static void bio_dp_init(struct dpages *dp, struct bio *bio)
drivers/md/dm-io.c
247
dp->context_ptr = bio->bi_io_vec;
drivers/md/dm-io.c
248
dp->context_bi = bio->bi_iter;
drivers/md/dm-io.c
310
struct bio *bio;
drivers/md/dm-io.c
353
bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO,
drivers/md/dm-io.c
355
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
drivers/md/dm-io.c
356
bio->bi_end_io = endio;
drivers/md/dm-io.c
357
bio->bi_ioprio = ioprio;
drivers/md/dm-io.c
358
store_io_and_region_in_bio(bio, io, region);
drivers/md/dm-io.c
362
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
drivers/md/dm-io.c
371
if (!bio_add_page(bio, page, len, offset))
drivers/md/dm-io.c
381
submit_bio(bio);
drivers/md/dm-io.c
478
bio_dp_init(dp, io_req->mem.ptr.bio);
drivers/md/dm-io.c
93
static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
drivers/md/dm-linear.c
89
int linear_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-linear.c
93
bio_set_dev(bio, lc->dev->bdev);
drivers/md/dm-linear.c
94
bio->bi_iter.bi_sector = linear_map_sector(ti, bio->bi_iter.bi_sector);
drivers/md/dm-log-writes.c
168
static void log_end_io(struct bio *bio)
drivers/md/dm-log-writes.c
170
struct log_writes_c *lc = bio->bi_private;
drivers/md/dm-log-writes.c
172
if (bio->bi_status) {
drivers/md/dm-log-writes.c
175
DMERR("Error writing log block, error=%d", bio->bi_status);
drivers/md/dm-log-writes.c
181
bio_free_pages(bio);
drivers/md/dm-log-writes.c
183
bio_put(bio);
drivers/md/dm-log-writes.c
186
static void log_end_super(struct bio *bio)
drivers/md/dm-log-writes.c
188
struct log_writes_c *lc = bio->bi_private;
drivers/md/dm-log-writes.c
191
log_end_io(bio);
drivers/md/dm-log-writes.c
216
struct bio *bio;
drivers/md/dm-log-writes.c
221
bio = bio_alloc(lc->logdev->bdev, 1, REQ_OP_WRITE, GFP_KERNEL);
drivers/md/dm-log-writes.c
222
bio->bi_iter.bi_size = 0;
drivers/md/dm-log-writes.c
223
bio->bi_iter.bi_sector = sector;
drivers/md/dm-log-writes.c
224
bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
drivers/md/dm-log-writes.c
226
bio->bi_private = lc;
drivers/md/dm-log-writes.c
231
bio_put(bio);
drivers/md/dm-log-writes.c
243
ret = bio_add_page(bio, page, lc->sectorsize, 0);
drivers/md/dm-log-writes.c
248
submit_bio(bio);
drivers/md/dm-log-writes.c
251
bio_put(bio);
drivers/md/dm-log-writes.c
264
struct bio *bio;
drivers/md/dm-log-writes.c
273
bio = bio_alloc(lc->logdev->bdev, bio_pages, REQ_OP_WRITE,
drivers/md/dm-log-writes.c
275
bio->bi_iter.bi_size = 0;
drivers/md/dm-log-writes.c
276
bio->bi_iter.bi_sector = sector;
drivers/md/dm-log-writes.c
277
bio->bi_end_io = log_end_io;
drivers/md/dm-log-writes.c
278
bio->bi_private = lc;
drivers/md/dm-log-writes.c
296
ret = bio_add_page(bio, page, pg_sectorlen, 0);
drivers/md/dm-log-writes.c
306
submit_bio(bio);
drivers/md/dm-log-writes.c
312
bio_free_pages(bio);
drivers/md/dm-log-writes.c
313
bio_put(bio);
drivers/md/dm-log-writes.c
321
struct bio *bio;
drivers/md/dm-log-writes.c
354
bio = bio_alloc(lc->logdev->bdev, bio_max_segs(block->vec_cnt),
drivers/md/dm-log-writes.c
356
bio->bi_iter.bi_size = 0;
drivers/md/dm-log-writes.c
357
bio->bi_iter.bi_sector = sector;
drivers/md/dm-log-writes.c
358
bio->bi_end_io = log_end_io;
drivers/md/dm-log-writes.c
359
bio->bi_private = lc;
drivers/md/dm-log-writes.c
366
ret = bio_add_page(bio, block->vecs[i].bv_page,
drivers/md/dm-log-writes.c
370
submit_bio(bio);
drivers/md/dm-log-writes.c
371
bio = bio_alloc(lc->logdev->bdev,
drivers/md/dm-log-writes.c
374
bio->bi_iter.bi_size = 0;
drivers/md/dm-log-writes.c
375
bio->bi_iter.bi_sector = sector;
drivers/md/dm-log-writes.c
376
bio->bi_end_io = log_end_io;
drivers/md/dm-log-writes.c
377
bio->bi_private = lc;
drivers/md/dm-log-writes.c
379
ret = bio_add_page(bio, block->vecs[i].bv_page,
drivers/md/dm-log-writes.c
383
bio_put(bio);
drivers/md/dm-log-writes.c
389
submit_bio(bio);
drivers/md/dm-log-writes.c
637
static void normal_map_bio(struct dm_target *ti, struct bio *bio)
drivers/md/dm-log-writes.c
641
bio_set_dev(bio, lc->dev->bdev);
drivers/md/dm-log-writes.c
644
static int log_writes_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-log-writes.c
647
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
drivers/md/dm-log-writes.c
653
bool flush_bio = (bio->bi_opf & REQ_PREFLUSH);
drivers/md/dm-log-writes.c
654
bool fua_bio = (bio->bi_opf & REQ_FUA);
drivers/md/dm-log-writes.c
655
bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
drivers/md/dm-log-writes.c
656
bool meta_bio = (bio->bi_opf & REQ_META);
drivers/md/dm-log-writes.c
667
if (bio_data_dir(bio) == READ)
drivers/md/dm-log-writes.c
671
if (!bio_sectors(bio) && !flush_bio)
drivers/md/dm-log-writes.c
681
alloc_size = struct_size(block, vecs, bio_segments(bio));
drivers/md/dm-log-writes.c
704
block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector);
drivers/md/dm-log-writes.c
705
block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio));
drivers/md/dm-log-writes.c
712
bio_endio(bio);
drivers/md/dm-log-writes.c
717
if (flush_bio && !bio_sectors(bio)) {
drivers/md/dm-log-writes.c
733
bio_for_each_segment(bv, bio, iter) {
drivers/md/dm-log-writes.c
763
normal_map_bio(ti, bio);
drivers/md/dm-log-writes.c
767
static int normal_end_io(struct dm_target *ti, struct bio *bio,
drivers/md/dm-log-writes.c
771
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
drivers/md/dm-log-writes.c
773
if (bio_data_dir(bio) == WRITE && pb->block) {
drivers/md/dm-mpath.c
1696
static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
drivers/md/dm-mpath.c
2038
struct bio *bio;
drivers/md/dm-mpath.c
2050
bio = bio_alloc(bdev, 1, REQ_OP_READ, GFP_KERNEL);
drivers/md/dm-mpath.c
2051
if (!bio) {
drivers/md/dm-mpath.c
2056
bio->bi_iter.bi_sector = 0;
drivers/md/dm-mpath.c
2057
__bio_add_page(bio, page, read_size, 0);
drivers/md/dm-mpath.c
2058
submit_bio_wait(bio);
drivers/md/dm-mpath.c
2059
status = bio->bi_status;
drivers/md/dm-mpath.c
2060
bio_put(bio);
drivers/md/dm-mpath.c
286
static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
drivers/md/dm-mpath.c
288
return dm_per_bio_data(bio, multipath_per_bio_data_size());
drivers/md/dm-mpath.c
298
static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
drivers/md/dm-mpath.c
300
struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
drivers/md/dm-mpath.c
303
mpio->nr_bytes = bio->bi_iter.bi_size;
drivers/md/dm-mpath.c
308
dm_bio_record(bio_details, bio);
drivers/md/dm-mpath.c
558
clone->bio = clone->biotail = NULL;
drivers/md/dm-mpath.c
594
static void __multipath_queue_bio(struct multipath *m, struct bio *bio)
drivers/md/dm-mpath.c
597
bio_list_add(&m->queued_bios, bio);
drivers/md/dm-mpath.c
602
static void multipath_queue_bio(struct multipath *m, struct bio *bio)
drivers/md/dm-mpath.c
607
__multipath_queue_bio(m, bio);
drivers/md/dm-mpath.c
611
static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
drivers/md/dm-mpath.c
618
pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
drivers/md/dm-mpath.c
623
__multipath_queue_bio(m, bio);
drivers/md/dm-mpath.c
630
multipath_queue_bio(m, bio);
drivers/md/dm-mpath.c
638
static int __multipath_map_bio(struct multipath *m, struct bio *bio,
drivers/md/dm-mpath.c
641
struct pgpath *pgpath = __map_bio(m, bio);
drivers/md/dm-mpath.c
658
bio->bi_status = 0;
drivers/md/dm-mpath.c
659
bio_set_dev(bio, pgpath->path.dev->bdev);
drivers/md/dm-mpath.c
660
bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
drivers/md/dm-mpath.c
669
static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
drivers/md/dm-mpath.c
674
multipath_init_per_bio_data(bio, &mpio);
drivers/md/dm-mpath.c
675
return __multipath_map_bio(m, bio, mpio);
drivers/md/dm-mpath.c
689
struct bio *bio;
drivers/md/dm-mpath.c
709
while ((bio = bio_list_pop(&bios))) {
drivers/md/dm-mpath.c
710
struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
drivers/md/dm-mpath.c
712
dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
drivers/md/dm-mpath.c
713
r = __multipath_map_bio(m, bio, mpio);
drivers/md/dm-mpath.c
716
bio->bi_status = BLK_STS_IOERR;
drivers/md/dm-mpath.c
717
bio_endio(bio);
drivers/md/dm-mpath.c
720
bio->bi_status = BLK_STS_DM_REQUEUE;
drivers/md/dm-mpath.c
721
bio_endio(bio);
drivers/md/dm-mpath.c
724
submit_bio_noacct(bio);
drivers/md/dm-pcache/backing_dev.c
128
static void backing_dev_bio_end(struct bio *bio)
drivers/md/dm-pcache/backing_dev.c
130
struct pcache_backing_dev_req *backing_req = bio->bi_private;
drivers/md/dm-pcache/backing_dev.c
134
backing_req->ret = blk_status_to_errno(bio->bi_status);
drivers/md/dm-pcache/backing_dev.c
156
submit_bio_noacct(&backing_req->bio);
drivers/md/dm-pcache/backing_dev.c
165
submit_bio_noacct(&backing_req->bio);
drivers/md/dm-pcache/backing_dev.c
175
static void bio_map(struct bio *bio, void *base, size_t size)
drivers/md/dm-pcache/backing_dev.c
185
BUG_ON(!bio_add_page(bio, page, size, offset));
drivers/md/dm-pcache/backing_dev.c
195
BUG_ON(!bio_add_page(bio, page, len, offset));
drivers/md/dm-pcache/backing_dev.c
206
struct bio *orig = pcache_req->bio;
drivers/md/dm-pcache/backing_dev.c
214
bio_init_clone(backing_dev->dm_dev->bdev, &backing_req->bio, orig, opts->gfp_mask);
drivers/md/dm-pcache/backing_dev.c
271
struct bio *clone;
drivers/md/dm-pcache/backing_dev.c
275
clone = &backing_req->bio;
drivers/md/dm-pcache/backing_dev.c
296
struct bio *backing_bio;
drivers/md/dm-pcache/backing_dev.c
298
bio_init(&backing_req->bio, backing_dev->dm_dev->bdev, backing_req->kmem.bvecs,
drivers/md/dm-pcache/backing_dev.c
301
backing_bio = &backing_req->bio;
drivers/md/dm-pcache/backing_dev.h
20
struct bio bio;
drivers/md/dm-pcache/cache_req.c
114
ret = segment_copy_to_bio(segment, pos->seg_off, len, pcache_req->bio, bio_off);
drivers/md/dm-pcache/cache_req.c
774
ret = cache_key_append(cache, key, pcache_req->bio->bi_opf & REQ_FUA);
drivers/md/dm-pcache/cache_req.c
827
struct bio *bio = pcache_req->bio;
drivers/md/dm-pcache/cache_req.c
829
if (unlikely(bio->bi_opf & REQ_PREFLUSH))
drivers/md/dm-pcache/cache_req.c
832
if (bio_data_dir(bio) == READ)
drivers/md/dm-pcache/cache_req.c
98
return segment_copy_from_bio(segment, pos->seg_off, key->len, pcache_req->bio, bio_off);
drivers/md/dm-pcache/dm_pcache.c
368
static int dm_pcache_map_bio(struct dm_target *ti, struct bio *bio)
drivers/md/dm-pcache/dm_pcache.c
370
struct pcache_request *pcache_req = dm_per_bio_data(bio, sizeof(struct pcache_request));
drivers/md/dm-pcache/dm_pcache.c
377
pcache_req->bio = bio;
drivers/md/dm-pcache/dm_pcache.c
378
pcache_req->off = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
drivers/md/dm-pcache/dm_pcache.c
379
pcache_req->data_len = bio->bi_iter.bi_size;
drivers/md/dm-pcache/dm_pcache.c
70
struct bio *bio = pcache_req->bio;
drivers/md/dm-pcache/dm_pcache.c
77
bio->bi_status = errno_to_blk_status(ret);
drivers/md/dm-pcache/dm_pcache.c
78
bio_endio(bio);
drivers/md/dm-pcache/dm_pcache.h
51
struct bio *bio;
drivers/md/dm-pcache/segment.c
15
iov_iter_bvec(&iter, ITER_DEST, &bio->bi_io_vec[bio->bi_iter.bi_idx],
drivers/md/dm-pcache/segment.c
16
bio_segments(bio), bio->bi_iter.bi_size);
drivers/md/dm-pcache/segment.c
17
iter.iov_offset = bio->bi_iter.bi_bvec_done;
drivers/md/dm-pcache/segment.c
30
u32 data_off, u32 data_len, struct bio *bio, u32 bio_off)
drivers/md/dm-pcache/segment.c
36
iov_iter_bvec(&iter, ITER_SOURCE, &bio->bi_io_vec[bio->bi_iter.bi_idx],
drivers/md/dm-pcache/segment.c
37
bio_segments(bio), bio->bi_iter.bi_size);
drivers/md/dm-pcache/segment.c
38
iter.iov_offset = bio->bi_iter.bi_bvec_done;
drivers/md/dm-pcache/segment.c
9
u32 data_off, u32 data_len, struct bio *bio, u32 bio_off)
drivers/md/dm-pcache/segment.h
61
u32 data_off, u32 data_len, struct bio *bio, u32 bio_off);
drivers/md/dm-pcache/segment.h
63
u32 data_off, u32 data_len, struct bio *bio, u32 bio_off);
drivers/md/dm-raid.c
3341
static int raid_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-raid.c
3354
if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors))
drivers/md/dm-raid.c
3357
if (unlikely(!md_handle_request(mddev, bio)))
drivers/md/dm-raid1.c
1196
static int mirror_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-raid1.c
1198
int r, rw = bio_data_dir(bio);
drivers/md/dm-raid1.c
1203
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
drivers/md/dm-raid1.c
1209
bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
drivers/md/dm-raid1.c
1210
queue_bio(ms, bio, rw);
drivers/md/dm-raid1.c
1214
r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
drivers/md/dm-raid1.c
1222
if (bio->bi_opf & REQ_RAHEAD)
drivers/md/dm-raid1.c
1225
queue_bio(ms, bio, rw);
drivers/md/dm-raid1.c
1233
m = choose_mirror(ms, bio->bi_iter.bi_sector);
drivers/md/dm-raid1.c
1237
dm_bio_record(&bio_record->details, bio);
drivers/md/dm-raid1.c
1240
map_bio(m, bio);
drivers/md/dm-raid1.c
1245
static int mirror_end_io(struct dm_target *ti, struct bio *bio,
drivers/md/dm-raid1.c
1248
int rw = bio_data_dir(bio);
drivers/md/dm-raid1.c
1253
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
drivers/md/dm-raid1.c
1259
if (!(bio->bi_opf & REQ_PREFLUSH) &&
drivers/md/dm-raid1.c
126
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
drivers/md/dm-raid1.c
1260
bio_op(bio) != REQ_OP_DISCARD)
drivers/md/dm-raid1.c
1268
if (bio->bi_opf & REQ_RAHEAD)
drivers/md/dm-raid1.c
1293
if (default_ok(m) || mirror_available(ms, bio)) {
drivers/md/dm-raid1.c
1296
dm_bio_restore(bd, bio);
drivers/md/dm-raid1.c
1298
bio->bi_status = 0;
drivers/md/dm-raid1.c
1300
queue_bio(ms, bio, rw);
drivers/md/dm-raid1.c
1318
struct bio *bio;
drivers/md/dm-raid1.c
1333
while ((bio = bio_list_pop(&holds)))
drivers/md/dm-raid1.c
1334
hold_bio(ms, bio);
drivers/md/dm-raid1.c
135
bio_list_add(bl, bio);
drivers/md/dm-raid1.c
144
struct bio *bio;
drivers/md/dm-raid1.c
146
while ((bio = bio_list_pop(bio_list)))
drivers/md/dm-raid1.c
147
queue_bio(ms, bio, WRITE);
drivers/md/dm-raid1.c
167
static struct mirror *bio_get_m(struct bio *bio)
drivers/md/dm-raid1.c
169
return (struct mirror *) bio->bi_next;
drivers/md/dm-raid1.c
172
static void bio_set_m(struct bio *bio, struct mirror *m)
drivers/md/dm-raid1.c
174
bio->bi_next = (struct bio *) m;
drivers/md/dm-raid1.c
444
static int mirror_available(struct mirror_set *ms, struct bio *bio)
drivers/md/dm-raid1.c
447
region_t region = dm_rh_bio_to_region(ms->rh, bio);
drivers/md/dm-raid1.c
450
return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
drivers/md/dm-raid1.c
458
static sector_t map_sector(struct mirror *m, struct bio *bio)
drivers/md/dm-raid1.c
460
if (unlikely(!bio->bi_iter.bi_size))
drivers/md/dm-raid1.c
462
return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
drivers/md/dm-raid1.c
465
static void map_bio(struct mirror *m, struct bio *bio)
drivers/md/dm-raid1.c
467
bio_set_dev(bio, m->dev->bdev);
drivers/md/dm-raid1.c
468
bio->bi_iter.bi_sector = map_sector(m, bio);
drivers/md/dm-raid1.c
472
struct bio *bio)
drivers/md/dm-raid1.c
475
io->sector = map_sector(m, bio);
drivers/md/dm-raid1.c
476
io->count = bio_sectors(bio);
drivers/md/dm-raid1.c
479
static void hold_bio(struct mirror_set *ms, struct bio *bio)
drivers/md/dm-raid1.c
494
bio->bi_status = BLK_STS_DM_REQUEUE;
drivers/md/dm-raid1.c
496
bio->bi_status = BLK_STS_IOERR;
drivers/md/dm-raid1.c
498
bio_endio(bio);
drivers/md/dm-raid1.c
505
bio_list_add(&ms->holds, bio);
drivers/md/dm-raid1.c
516
struct bio *bio = context;
drivers/md/dm-raid1.c
519
m = bio_get_m(bio);
drivers/md/dm-raid1.c
520
bio_set_m(bio, NULL);
drivers/md/dm-raid1.c
523
bio_endio(bio);
drivers/md/dm-raid1.c
529
if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
drivers/md/dm-raid1.c
532
queue_bio(m->ms, bio, bio_data_dir(bio));
drivers/md/dm-raid1.c
538
bio_io_error(bio);
drivers/md/dm-raid1.c
542
static void read_async_bio(struct mirror *m, struct bio *bio)
drivers/md/dm-raid1.c
548
.mem.ptr.bio = bio,
drivers/md/dm-raid1.c
550
.notify.context = bio,
drivers/md/dm-raid1.c
554
map_region(&io, m, bio);
drivers/md/dm-raid1.c
555
bio_set_m(bio, m);
drivers/md/dm-raid1.c
569
struct bio *bio;
drivers/md/dm-raid1.c
572
while ((bio = bio_list_pop(reads))) {
drivers/md/dm-raid1.c
573
region = dm_rh_bio_to_region(ms->rh, bio);
drivers/md/dm-raid1.c
580
m = choose_mirror(ms, bio->bi_iter.bi_sector);
drivers/md/dm-raid1.c
585
read_async_bio(m, bio);
drivers/md/dm-raid1.c
587
bio_io_error(bio);
drivers/md/dm-raid1.c
606
struct bio *bio = context;
drivers/md/dm-raid1.c
611
ms = bio_get_m(bio)->ms;
drivers/md/dm-raid1.c
612
bio_set_m(bio, NULL);
drivers/md/dm-raid1.c
621
bio_endio(bio);
drivers/md/dm-raid1.c
629
if (bio_op(bio) == REQ_OP_DISCARD) {
drivers/md/dm-raid1.c
630
bio->bi_status = BLK_STS_NOTSUPP;
drivers/md/dm-raid1.c
631
bio_endio(bio);
drivers/md/dm-raid1.c
647
bio_list_add(&ms->failures, bio);
drivers/md/dm-raid1.c
653
static void do_write(struct mirror_set *ms, struct bio *bio)
drivers/md/dm-raid1.c
658
blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH | REQ_ATOMIC);
drivers/md/dm-raid1.c
662
.mem.ptr.bio = bio,
drivers/md/dm-raid1.c
664
.notify.context = bio,
drivers/md/dm-raid1.c
668
if (bio_op(bio) == REQ_OP_DISCARD) {
drivers/md/dm-raid1.c
675
map_region(dest++, m, bio);
drivers/md/dm-raid1.c
681
bio_set_m(bio, get_default_mirror(ms));
drivers/md/dm-raid1.c
689
struct bio *bio;
drivers/md/dm-raid1.c
706
while ((bio = bio_list_pop(writes))) {
drivers/md/dm-raid1.c
707
if ((bio->bi_opf & REQ_PREFLUSH) ||
drivers/md/dm-raid1.c
708
(bio_op(bio) == REQ_OP_DISCARD)) {
drivers/md/dm-raid1.c
709
bio_list_add(&sync, bio);
drivers/md/dm-raid1.c
713
region = dm_rh_bio_to_region(ms->rh, bio);
drivers/md/dm-raid1.c
717
bio_list_add(&requeue, bio);
drivers/md/dm-raid1.c
737
bio_list_add(this_list, bio);
drivers/md/dm-raid1.c
775
while ((bio = bio_list_pop(&sync)))
drivers/md/dm-raid1.c
776
do_write(ms, bio);
drivers/md/dm-raid1.c
778
while ((bio = bio_list_pop(&recover)))
drivers/md/dm-raid1.c
779
dm_rh_delay(ms->rh, bio);
drivers/md/dm-raid1.c
781
while ((bio = bio_list_pop(&nosync))) {
drivers/md/dm-raid1.c
784
bio_list_add(&ms->failures, bio);
drivers/md/dm-raid1.c
788
map_bio(get_default_mirror(ms), bio);
drivers/md/dm-raid1.c
789
submit_bio_noacct(bio);
drivers/md/dm-raid1.c
796
struct bio *bio;
drivers/md/dm-raid1.c
818
while ((bio = bio_list_pop(failures))) {
drivers/md/dm-raid1.c
821
dm_rh_mark_nosync(ms->rh, bio);
drivers/md/dm-raid1.c
838
bio_io_error(bio);
drivers/md/dm-raid1.c
840
hold_bio(ms, bio);
drivers/md/dm-raid1.c
842
bio_endio(bio);
drivers/md/dm-region-hash.c
131
region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
drivers/md/dm-region-hash.c
133
return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
drivers/md/dm-region-hash.c
398
void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
drivers/md/dm-region-hash.c
403
region_t region = dm_rh_bio_to_region(rh, bio);
drivers/md/dm-region-hash.c
406
if (bio->bi_opf & REQ_PREFLUSH) {
drivers/md/dm-region-hash.c
411
if (bio_op(bio) == REQ_OP_DISCARD)
drivers/md/dm-region-hash.c
531
struct bio *bio;
drivers/md/dm-region-hash.c
533
for (bio = bios->head; bio; bio = bio->bi_next) {
drivers/md/dm-region-hash.c
534
if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
drivers/md/dm-region-hash.c
536
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
drivers/md/dm-region-hash.c
693
void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
drivers/md/dm-region-hash.c
698
reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
drivers/md/dm-region-hash.c
699
bio_list_add(&reg->delayed_bios, bio);
drivers/md/dm-rq.c
119
tio->clone->bio = NULL;
drivers/md/dm-rq.c
313
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
drivers/md/dm-rq.c
318
container_of(bio, struct dm_rq_clone_bio_info, clone);
drivers/md/dm-rq.c
322
bio->bi_end_io = end_clone_bio;
drivers/md/dm-rq.c
78
static void end_clone_bio(struct bio *clone)
drivers/md/dm-rq.h
29
struct bio *orig;
drivers/md/dm-rq.h
31
struct bio clone;
drivers/md/dm-snap.c
1117
static void error_bios(struct bio *bio);
drivers/md/dm-snap.c
1122
struct bio *b = NULL;
drivers/md/dm-snap.c
1562
static void flush_bios(struct bio *bio)
drivers/md/dm-snap.c
1564
struct bio *n;
drivers/md/dm-snap.c
1566
while (bio) {
drivers/md/dm-snap.c
1567
n = bio->bi_next;
drivers/md/dm-snap.c
1568
bio->bi_next = NULL;
drivers/md/dm-snap.c
1569
submit_bio_noacct(bio);
drivers/md/dm-snap.c
1570
bio = n;
drivers/md/dm-snap.c
1574
static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
drivers/md/dm-snap.c
1579
static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
drivers/md/dm-snap.c
1581
struct bio *n;
drivers/md/dm-snap.c
1584
while (bio) {
drivers/md/dm-snap.c
1585
n = bio->bi_next;
drivers/md/dm-snap.c
1586
bio->bi_next = NULL;
drivers/md/dm-snap.c
1587
r = do_origin(s->origin, bio, false);
drivers/md/dm-snap.c
1589
submit_bio_noacct(bio);
drivers/md/dm-snap.c
1590
bio = n;
drivers/md/dm-snap.c
1597
static void error_bios(struct bio *bio)
drivers/md/dm-snap.c
1599
struct bio *n;
drivers/md/dm-snap.c
1601
while (bio) {
drivers/md/dm-snap.c
1602
n = bio->bi_next;
drivers/md/dm-snap.c
1603
bio->bi_next = NULL;
drivers/md/dm-snap.c
1604
bio_io_error(bio);
drivers/md/dm-snap.c
1605
bio = n;
drivers/md/dm-snap.c
1639
struct bio *origin_bios = NULL;
drivers/md/dm-snap.c
1640
struct bio *snapshot_bios = NULL;
drivers/md/dm-snap.c
1641
struct bio *full_bio = NULL;
drivers/md/dm-snap.c
1807
static void full_bio_end_io(struct bio *bio)
drivers/md/dm-snap.c
1809
void *callback_data = bio->bi_private;
drivers/md/dm-snap.c
1811
dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0);
drivers/md/dm-snap.c
1815
struct bio *bio)
drivers/md/dm-snap.c
1820
pe->full_bio = bio;
drivers/md/dm-snap.c
1821
pe->full_bio_end_io = bio->bi_end_io;
drivers/md/dm-snap.c
1827
bio->bi_end_io = full_bio_end_io;
drivers/md/dm-snap.c
1828
bio->bi_private = callback_data;
drivers/md/dm-snap.c
1830
submit_bio_noacct(bio);
drivers/md/dm-snap.c
1899
struct bio *bio, chunk_t chunk)
drivers/md/dm-snap.c
1901
bio_set_dev(bio, s->cow->bdev);
drivers/md/dm-snap.c
1902
bio->bi_iter.bi_sector =
drivers/md/dm-snap.c
1905
(bio->bi_iter.bi_sector & s->store->chunk_mask);
drivers/md/dm-snap.c
1910
struct bio *bio = context;
drivers/md/dm-snap.c
1911
struct dm_snapshot *s = bio->bi_private;
drivers/md/dm-snap.c
1914
bio->bi_status = write_err ? BLK_STS_IOERR : 0;
drivers/md/dm-snap.c
1915
bio_endio(bio);
drivers/md/dm-snap.c
1919
struct bio *bio, chunk_t chunk)
drivers/md/dm-snap.c
1924
dest.sector = bio->bi_iter.bi_sector;
drivers/md/dm-snap.c
1928
WARN_ON_ONCE(bio->bi_private);
drivers/md/dm-snap.c
1929
bio->bi_private = s;
drivers/md/dm-snap.c
1930
dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
drivers/md/dm-snap.c
1933
static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio)
drivers/md/dm-snap.c
1935
return bio->bi_iter.bi_size ==
drivers/md/dm-snap.c
1939
static int snapshot_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-snap.c
1948
init_tracked_chunk(bio);
drivers/md/dm-snap.c
1950
if (bio->bi_opf & REQ_PREFLUSH) {
drivers/md/dm-snap.c
1951
bio_set_dev(bio, s->cow->bdev);
drivers/md/dm-snap.c
1955
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
drivers/md/dm-snap.c
1963
if (bio_data_dir(bio) == WRITE) {
drivers/md/dm-snap.c
1972
bio_data_dir(bio) == WRITE)) {
drivers/md/dm-snap.c
1977
if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
drivers/md/dm-snap.c
1978
if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) {
drivers/md/dm-snap.c
1985
bio_set_dev(bio, s->origin->bdev);
drivers/md/dm-snap.c
1986
track_chunk(s, bio, chunk);
drivers/md/dm-snap.c
1995
remap_exception(s, e, bio, chunk);
drivers/md/dm-snap.c
1996
if (unlikely(bio_op(bio) == REQ_OP_DISCARD) &&
drivers/md/dm-snap.c
1997
io_overlaps_chunk(s, bio)) {
drivers/md/dm-snap.c
2000
zero_exception(s, e, bio, chunk);
drivers/md/dm-snap.c
2007
if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
drivers/md/dm-snap.c
2012
bio_endio(bio);
drivers/md/dm-snap.c
2022
if (bio_data_dir(bio) == WRITE) {
drivers/md/dm-snap.c
2032
remap_exception(s, e, bio, chunk);
drivers/md/dm-snap.c
2057
remap_exception(s, &pe->e, bio, chunk);
drivers/md/dm-snap.c
2061
if (!pe->started && io_overlaps_chunk(s, bio)) {
drivers/md/dm-snap.c
2067
start_full_bio(pe, bio);
drivers/md/dm-snap.c
2071
bio_list_add(&pe->snapshot_bios, bio);
drivers/md/dm-snap.c
2084
bio_set_dev(bio, s->origin->bdev);
drivers/md/dm-snap.c
2085
track_chunk(s, bio, chunk);
drivers/md/dm-snap.c
2107
static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-snap.c
2114
init_tracked_chunk(bio);
drivers/md/dm-snap.c
2116
if (bio->bi_opf & REQ_PREFLUSH) {
drivers/md/dm-snap.c
2117
if (!dm_bio_get_target_bio_nr(bio))
drivers/md/dm-snap.c
2118
bio_set_dev(bio, s->origin->bdev);
drivers/md/dm-snap.c
2120
bio_set_dev(bio, s->cow->bdev);
drivers/md/dm-snap.c
2124
if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
drivers/md/dm-snap.c
2126
bio_endio(bio);
drivers/md/dm-snap.c
2130
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
drivers/md/dm-snap.c
2142
if (bio_data_dir(bio) == WRITE &&
drivers/md/dm-snap.c
2146
bio_set_dev(bio, s->origin->bdev);
drivers/md/dm-snap.c
2147
bio_list_add(&s->bios_queued_during_merge, bio);
drivers/md/dm-snap.c
2152
remap_exception(s, e, bio, chunk);
drivers/md/dm-snap.c
2154
if (bio_data_dir(bio) == WRITE)
drivers/md/dm-snap.c
2155
track_chunk(s, bio, chunk);
drivers/md/dm-snap.c
2160
bio_set_dev(bio, s->origin->bdev);
drivers/md/dm-snap.c
2162
if (bio_data_dir(bio) == WRITE) {
drivers/md/dm-snap.c
2164
return do_origin(s->origin, bio, false);
drivers/md/dm-snap.c
2173
static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
drivers/md/dm-snap.c
2178
if (is_bio_tracked(bio))
drivers/md/dm-snap.c
2179
stop_tracking_chunk(s, bio);
drivers/md/dm-snap.c
234
struct bio *full_bio;
drivers/md/dm-snap.c
2426
struct bio *bio)
drivers/md/dm-snap.c
250
static void init_tracked_chunk(struct bio *bio)
drivers/md/dm-snap.c
2509
if (bio) {
drivers/md/dm-snap.c
2510
bio_list_add(&pe->origin_bios, bio);
drivers/md/dm-snap.c
2511
bio = NULL;
drivers/md/dm-snap.c
252
struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
drivers/md/dm-snap.c
2547
static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
drivers/md/dm-snap.c
2564
r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
drivers/md/dm-snap.c
257
static bool is_bio_tracked(struct bio *bio)
drivers/md/dm-snap.c
259
struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
drivers/md/dm-snap.c
264
static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
drivers/md/dm-snap.c
2658
static int origin_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-snap.c
266
struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
drivers/md/dm-snap.c
2663
bio_set_dev(bio, o->dev->bdev);
drivers/md/dm-snap.c
2665
if (unlikely(bio->bi_opf & REQ_PREFLUSH))
drivers/md/dm-snap.c
2668
if (bio_data_dir(bio) != WRITE)
drivers/md/dm-snap.c
2672
((unsigned int)bio->bi_iter.bi_sector & (o->split_boundary - 1));
drivers/md/dm-snap.c
2674
if (bio_sectors(bio) > available_sectors)
drivers/md/dm-snap.c
2675
dm_accept_partial_bio(bio, available_sectors);
drivers/md/dm-snap.c
2678
return do_origin(o->dev, bio, true);
drivers/md/dm-snap.c
276
static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
drivers/md/dm-snap.c
278
struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
drivers/md/dm-snap.c
919
static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
drivers/md/dm-snap.c
977
static void flush_bios(struct bio *bio);
drivers/md/dm-snap.c
981
struct bio *b = NULL;
drivers/md/dm-stripe.c
250
static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
drivers/md/dm-stripe.c
255
stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
drivers/md/dm-stripe.c
257
stripe_map_range_sector(sc, bio_end_sector(bio),
drivers/md/dm-stripe.c
260
bio_set_dev(bio, sc->stripe[target_stripe].dev->bdev);
drivers/md/dm-stripe.c
261
bio->bi_iter.bi_sector = begin +
drivers/md/dm-stripe.c
263
bio->bi_iter.bi_size = to_bytes(end - begin);
drivers/md/dm-stripe.c
268
bio_endio(bio);
drivers/md/dm-stripe.c
272
int stripe_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-stripe.c
278
if (bio->bi_opf & REQ_PREFLUSH) {
drivers/md/dm-stripe.c
279
target_bio_nr = dm_bio_get_target_bio_nr(bio);
drivers/md/dm-stripe.c
281
bio_set_dev(bio, sc->stripe[target_bio_nr].dev->bdev);
drivers/md/dm-stripe.c
284
if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
drivers/md/dm-stripe.c
285
unlikely(bio_op(bio) == REQ_OP_SECURE_ERASE) ||
drivers/md/dm-stripe.c
286
unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) {
drivers/md/dm-stripe.c
287
target_bio_nr = dm_bio_get_target_bio_nr(bio);
drivers/md/dm-stripe.c
289
return stripe_map_range(sc, bio, target_bio_nr);
drivers/md/dm-stripe.c
292
stripe_map_sector(sc, bio->bi_iter.bi_sector,
drivers/md/dm-stripe.c
293
&stripe, &bio->bi_iter.bi_sector);
drivers/md/dm-stripe.c
295
bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
drivers/md/dm-stripe.c
296
bio_set_dev(bio, sc->stripe[stripe].dev->bdev);
drivers/md/dm-stripe.c
404
static int stripe_end_io(struct dm_target *ti, struct bio *bio,
drivers/md/dm-stripe.c
414
if (bio->bi_opf & REQ_RAHEAD)
drivers/md/dm-stripe.c
420
format_dev_t(major_minor, bio_dev(bio));
drivers/md/dm-switch.c
320
static int switch_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-switch.c
323
sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
drivers/md/dm-switch.c
326
bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev);
drivers/md/dm-switch.c
327
bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
drivers/md/dm-target.c
194
static int io_err_map(struct dm_target *tt, struct bio *bio)
drivers/md/dm-thin.c
1022
if (bio) {
drivers/md/dm-thin.c
1024
complete_overwrite_bio(tc, bio);
drivers/md/dm-thin.c
1049
bio_io_error(m->bio);
drivers/md/dm-thin.c
1055
bio_endio(m->bio);
drivers/md/dm-thin.c
1067
bio_io_error(m->bio);
drivers/md/dm-thin.c
1069
bio_endio(m->bio);
drivers/md/dm-thin.c
1078
struct bio *discard_parent)
drivers/md/dm-thin.c
1135
static void passdown_endio(struct bio *bio)
drivers/md/dm-thin.c
1141
queue_passdown_pt2(bio->bi_private);
drivers/md/dm-thin.c
1142
bio_put(bio);
drivers/md/dm-thin.c
1150
struct bio *discard_parent;
drivers/md/dm-thin.c
1161
bio_io_error(m->bio);
drivers/md/dm-thin.c
1174
bio_io_error(m->bio);
drivers/md/dm-thin.c
1208
bio_io_error(m->bio);
drivers/md/dm-thin.c
1210
bio_endio(m->bio);
drivers/md/dm-thin.c
1234
static int io_overlaps_block(struct pool *pool, struct bio *bio)
drivers/md/dm-thin.c
1236
return bio->bi_iter.bi_size ==
drivers/md/dm-thin.c
1240
static int io_overwrites_block(struct pool *pool, struct bio *bio)
drivers/md/dm-thin.c
1242
return (bio_data_dir(bio) == WRITE) &&
drivers/md/dm-thin.c
1243
io_overlaps_block(pool, bio);
drivers/md/dm-thin.c
1246
static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
drivers/md/dm-thin.c
1249
*save = bio->bi_end_io;
drivers/md/dm-thin.c
1250
bio->bi_end_io = fn;
drivers/md/dm-thin.c
1271
m->bio = NULL;
drivers/md/dm-thin.c
1290
static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
drivers/md/dm-thin.c
1295
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
drivers/md/dm-thin.c
1298
m->bio = bio;
drivers/md/dm-thin.c
1299
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
drivers/md/dm-thin.c
1300
inc_all_io_entry(pool, bio);
drivers/md/dm-thin.c
1301
remap_and_issue(tc, bio, data_begin);
drivers/md/dm-thin.c
1310
struct dm_bio_prison_cell *cell, struct bio *bio,
drivers/md/dm-thin.c
1338
if (io_overwrites_block(pool, bio))
drivers/md/dm-thin.c
1339
remap_and_issue_overwrite(tc, bio, data_dest, m);
drivers/md/dm-thin.c
1370
struct dm_bio_prison_cell *cell, struct bio *bio)
drivers/md/dm-thin.c
1373
data_origin, data_dest, cell, bio,
drivers/md/dm-thin.c
1379
struct bio *bio)
drivers/md/dm-thin.c
1397
if (io_overwrites_block(pool, bio))
drivers/md/dm-thin.c
1398
remap_and_issue_overwrite(tc, bio, data_block, m);
drivers/md/dm-thin.c
1409
struct dm_bio_prison_cell *cell, struct bio *bio)
drivers/md/dm-thin.c
1417
virt_block, data_dest, cell, bio,
drivers/md/dm-thin.c
1422
virt_block, data_dest, cell, bio,
drivers/md/dm-thin.c
1426
schedule_zero(tc, virt_block, data_dest, cell, bio);
drivers/md/dm-thin.c
1580
static void retry_on_resume(struct bio *bio)
drivers/md/dm-thin.c
1582
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
drivers/md/dm-thin.c
1586
bio_list_add(&tc->retry_on_resume_list, bio);
drivers/md/dm-thin.c
1614
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
drivers/md/dm-thin.c
1619
bio->bi_status = error;
drivers/md/dm-thin.c
1620
bio_endio(bio);
drivers/md/dm-thin.c
1622
retry_on_resume(bio);
drivers/md/dm-thin.c
1627
struct bio *bio;
drivers/md/dm-thin.c
1640
while ((bio = bio_list_pop(&bios)))
drivers/md/dm-thin.c
1641
retry_on_resume(bio);
drivers/md/dm-thin.c
1658
m->bio = virt_cell->holder;
drivers/md/dm-thin.c
1665
struct bio *bio)
drivers/md/dm-thin.c
1721
m->bio = bio;
drivers/md/dm-thin.c
1731
bio_inc_remaining(bio);
drivers/md/dm-thin.c
1745
struct bio *bio = virt_cell->holder;
drivers/md/dm-thin.c
1746
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
drivers/md/dm-thin.c
1754
break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio);
drivers/md/dm-thin.c
1761
bio_endio(bio);
drivers/md/dm-thin.c
1764
static void process_discard_bio(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
1770
get_bio_block_range(tc, bio, &begin, &end);
drivers/md/dm-thin.c
1775
bio_endio(bio);
drivers/md/dm-thin.c
1781
bio_endio(bio);
drivers/md/dm-thin.c
1785
if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) {
drivers/md/dm-thin.c
1799
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
drivers/md/dm-thin.c
1812
data_block, cell, bio);
drivers/md/dm-thin.c
1831
struct bio *bio;
drivers/md/dm-thin.c
1833
while ((bio = bio_list_pop(&cell->bios))) {
drivers/md/dm-thin.c
1834
if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
drivers/md/dm-thin.c
1835
bio_op(bio) == REQ_OP_DISCARD)
drivers/md/dm-thin.c
1836
bio_list_add(&info->defer_bios, bio);
drivers/md/dm-thin.c
1838
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
drivers/md/dm-thin.c
1841
inc_all_io_entry(info->tc->pool, bio);
drivers/md/dm-thin.c
1842
bio_list_add(&info->issue_bios, bio);
drivers/md/dm-thin.c
1851
struct bio *bio;
drivers/md/dm-thin.c
1861
while ((bio = bio_list_pop(&info.defer_bios)))
drivers/md/dm-thin.c
1862
thin_defer_bio(tc, bio);
drivers/md/dm-thin.c
1864
while ((bio = bio_list_pop(&info.issue_bios)))
drivers/md/dm-thin.c
1865
remap_and_issue(tc, bio, block);
drivers/md/dm-thin.c
1868
static void process_shared_bio(struct thin_c *tc, struct bio *bio,
drivers/md/dm-thin.c
1882
if (bio_detain(pool, &key, bio, &data_cell)) {
drivers/md/dm-thin.c
1887
if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
drivers/md/dm-thin.c
1888
break_sharing(tc, bio, block, &key, lookup_result, data_cell);
drivers/md/dm-thin.c
1891
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
drivers/md/dm-thin.c
1894
inc_all_io_entry(pool, bio);
drivers/md/dm-thin.c
1895
remap_and_issue(tc, bio, lookup_result->block);
drivers/md/dm-thin.c
1902
static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
drivers/md/dm-thin.c
1912
if (!bio->bi_iter.bi_size) {
drivers/md/dm-thin.c
1913
inc_all_io_entry(pool, bio);
drivers/md/dm-thin.c
1916
remap_and_issue(tc, bio, 0);
drivers/md/dm-thin.c
1923
if (bio_data_dir(bio) == READ) {
drivers/md/dm-thin.c
1924
zero_fill_bio(bio);
drivers/md/dm-thin.c
1926
bio_endio(bio);
drivers/md/dm-thin.c
1934
schedule_external_copy(tc, block, data_block, cell, bio);
drivers/md/dm-thin.c
1936
schedule_zero(tc, block, data_block, cell, bio);
drivers/md/dm-thin.c
1955
struct bio *bio = cell->holder;
drivers/md/dm-thin.c
1956
dm_block_t block = get_bio_block(tc, bio);
drivers/md/dm-thin.c
1968
process_shared_bio(tc, bio, block, &lookup_result, cell);
drivers/md/dm-thin.c
1970
inc_all_io_entry(pool, bio);
drivers/md/dm-thin.c
1971
remap_and_issue(tc, bio, lookup_result.block);
drivers/md/dm-thin.c
1977
if (bio_data_dir(bio) == READ && tc->origin_dev) {
drivers/md/dm-thin.c
1978
inc_all_io_entry(pool, bio);
drivers/md/dm-thin.c
1981
if (bio_end_sector(bio) <= tc->origin_size)
drivers/md/dm-thin.c
1982
remap_to_origin_and_issue(tc, bio);
drivers/md/dm-thin.c
1984
else if (bio->bi_iter.bi_sector < tc->origin_size) {
drivers/md/dm-thin.c
1985
zero_fill_bio(bio);
drivers/md/dm-thin.c
1986
bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
drivers/md/dm-thin.c
1987
remap_to_origin_and_issue(tc, bio);
drivers/md/dm-thin.c
1990
zero_fill_bio(bio);
drivers/md/dm-thin.c
1991
bio_endio(bio);
drivers/md/dm-thin.c
1994
provision_block(tc, bio, block, cell);
drivers/md/dm-thin.c
2001
bio_io_error(bio);
drivers/md/dm-thin.c
2006
static void process_bio(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
2009
dm_block_t block = get_bio_block(tc, bio);
drivers/md/dm-thin.c
2018
if (bio_detain(pool, &key, bio, &cell))
drivers/md/dm-thin.c
2024
static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
drivers/md/dm-thin.c
2028
int rw = bio_data_dir(bio);
drivers/md/dm-thin.c
2029
dm_block_t block = get_bio_block(tc, bio);
drivers/md/dm-thin.c
2035
if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
drivers/md/dm-thin.c
2036
handle_unserviceable_bio(tc->pool, bio);
drivers/md/dm-thin.c
2040
inc_all_io_entry(tc->pool, bio);
drivers/md/dm-thin.c
2041
remap_and_issue(tc, bio, lookup_result.block);
drivers/md/dm-thin.c
2051
handle_unserviceable_bio(tc->pool, bio);
drivers/md/dm-thin.c
2056
inc_all_io_entry(tc->pool, bio);
drivers/md/dm-thin.c
2057
remap_to_origin_and_issue(tc, bio);
drivers/md/dm-thin.c
2061
zero_fill_bio(bio);
drivers/md/dm-thin.c
2062
bio_endio(bio);
drivers/md/dm-thin.c
2070
bio_io_error(bio);
drivers/md/dm-thin.c
2075
static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
2077
__process_bio_read_only(tc, bio, NULL);
drivers/md/dm-thin.c
2085
static void process_bio_success(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
2087
bio_endio(bio);
drivers/md/dm-thin.c
2090
static void process_bio_fail(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
2092
bio_io_error(bio);
drivers/md/dm-thin.c
2118
static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
2122
sector_t bi_sector = bio->bi_iter.bi_sector;
drivers/md/dm-thin.c
2136
pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
drivers/md/dm-thin.c
2145
struct bio *bio;
drivers/md/dm-thin.c
2149
bio = thin_bio(pbd);
drivers/md/dm-thin.c
2151
bio_list_add(&tc->deferred_bio_list, bio);
drivers/md/dm-thin.c
2160
struct bio *bio;
drivers/md/dm-thin.c
2168
while ((bio = bio_list_pop(&bios)))
drivers/md/dm-thin.c
2169
__thin_bio_rb_add(tc, bio);
drivers/md/dm-thin.c
2182
struct bio *bio;
drivers/md/dm-thin.c
2210
while ((bio = bio_list_pop(&bios))) {
drivers/md/dm-thin.c
2218
bio_list_add(&tc->deferred_bio_list, bio);
drivers/md/dm-thin.c
2224
if (bio_op(bio) == REQ_OP_DISCARD)
drivers/md/dm-thin.c
2225
pool->process_discard(tc, bio);
drivers/md/dm-thin.c
2227
pool->process_bio(tc, bio);
drivers/md/dm-thin.c
226
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
drivers/md/dm-thin.c
2360
struct bio *bio;
drivers/md/dm-thin.c
2393
while ((bio = bio_list_pop(&bios)))
drivers/md/dm-thin.c
2394
bio_io_error(bio);
drivers/md/dm-thin.c
2399
while ((bio = bio_list_pop(&bio_completions)))
drivers/md/dm-thin.c
2400
bio_endio(bio);
drivers/md/dm-thin.c
2402
while ((bio = bio_list_pop(&bios))) {
drivers/md/dm-thin.c
2407
if (bio->bi_opf & REQ_PREFLUSH)
drivers/md/dm-thin.c
2408
bio_endio(bio);
drivers/md/dm-thin.c
2410
dm_submit_bio_remap(bio, NULL);
drivers/md/dm-thin.c
2672
static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
2677
bio_list_add(&tc->deferred_bio_list, bio);
drivers/md/dm-thin.c
2683
static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
2688
thin_defer_bio(tc, bio);
drivers/md/dm-thin.c
2705
static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
2707
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
drivers/md/dm-thin.c
2719
static int thin_bio_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-thin.c
2723
dm_block_t block = get_bio_block(tc, bio);
drivers/md/dm-thin.c
2729
thin_hook_bio(tc, bio);
drivers/md/dm-thin.c
2732
bio->bi_status = BLK_STS_DM_REQUEUE;
drivers/md/dm-thin.c
2733
bio_endio(bio);
drivers/md/dm-thin.c
2738
bio_io_error(bio);
drivers/md/dm-thin.c
2742
if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
drivers/md/dm-thin.c
2743
thin_defer_bio_with_throttle(tc, bio);
drivers/md/dm-thin.c
2752
if (bio_detain(tc->pool, &key, bio, &virt_cell))
drivers/md/dm-thin.c
2782
if (bio_detain(tc->pool, &key, bio, &data_cell)) {
drivers/md/dm-thin.c
2787
inc_all_io_entry(tc->pool, bio);
drivers/md/dm-thin.c
2791
remap(tc, bio, result.block);
drivers/md/dm-thin.c
2805
bio_io_error(bio);
drivers/md/dm-thin.c
3438
static int pool_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-thin.c
3447
bio_set_dev(bio, pt->data_dev->bdev);
drivers/md/dm-thin.c
384
struct bio *parent_bio;
drivers/md/dm-thin.c
385
struct bio *bio;
drivers/md/dm-thin.c
388
static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
drivers/md/dm-thin.c
395
op->bio = NULL;
drivers/md/dm-thin.c
404
__blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio);
drivers/md/dm-thin.c
409
if (op->bio) {
drivers/md/dm-thin.c
414
bio_chain(op->bio, op->parent_bio);
drivers/md/dm-thin.c
415
op->bio->bi_opf = REQ_OP_DISCARD;
drivers/md/dm-thin.c
416
submit_bio(op->bio);
drivers/md/dm-thin.c
4326
static int thin_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-thin.c
4328
bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
drivers/md/dm-thin.c
4330
return thin_bio_map(ti, bio);
drivers/md/dm-thin.c
4333
static int thin_endio(struct dm_target *ti, struct bio *bio,
drivers/md/dm-thin.c
4337
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
drivers/md/dm-thin.c
443
static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
drivers/md/dm-thin.c
455
r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
drivers/md/dm-thin.c
597
struct bio *bio;
drivers/md/dm-thin.c
599
while ((bio = bio_list_pop(bios))) {
drivers/md/dm-thin.c
600
bio->bi_status = error;
drivers/md/dm-thin.c
601
bio_endio(bio);
drivers/md/dm-thin.c
672
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
675
sector_t block_nr = bio->bi_iter.bi_sector;
drivers/md/dm-thin.c
688
static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
drivers/md/dm-thin.c
692
sector_t b = bio->bi_iter.bi_sector;
drivers/md/dm-thin.c
693
sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
drivers/md/dm-thin.c
714
static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
drivers/md/dm-thin.c
717
sector_t bi_sector = bio->bi_iter.bi_sector;
drivers/md/dm-thin.c
719
bio_set_dev(bio, tc->pool_dev->bdev);
drivers/md/dm-thin.c
721
bio->bi_iter.bi_sector =
drivers/md/dm-thin.c
725
bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
drivers/md/dm-thin.c
730
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
732
bio_set_dev(bio, tc->origin_dev->bdev);
drivers/md/dm-thin.c
735
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
737
return op_is_flush(bio->bi_opf) &&
drivers/md/dm-thin.c
741
static void inc_all_io_entry(struct pool *pool, struct bio *bio)
drivers/md/dm-thin.c
745
if (bio_op(bio) == REQ_OP_DISCARD)
drivers/md/dm-thin.c
748
h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
drivers/md/dm-thin.c
752
static void issue(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
756
if (!bio_triggers_commit(tc, bio)) {
drivers/md/dm-thin.c
757
dm_submit_bio_remap(bio, NULL);
drivers/md/dm-thin.c
767
bio_io_error(bio);
drivers/md/dm-thin.c
776
bio_list_add(&pool->deferred_flush_bios, bio);
drivers/md/dm-thin.c
780
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
782
remap_to_origin(tc, bio);
drivers/md/dm-thin.c
783
issue(tc, bio);
drivers/md/dm-thin.c
786
static void remap_and_issue(struct thin_c *tc, struct bio *bio,
drivers/md/dm-thin.c
789
remap(tc, bio, block);
drivers/md/dm-thin.c
790
issue(tc, bio);
drivers/md/dm-thin.c
823
struct bio *bio;
drivers/md/dm-thin.c
855
static void overwrite_endio(struct bio *bio)
drivers/md/dm-thin.c
857
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
drivers/md/dm-thin.c
860
bio->bi_end_io = m->saved_bi_end_io;
drivers/md/dm-thin.c
862
m->status = bio->bi_status;
drivers/md/dm-thin.c
897
static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
drivers/md/dm-thin.c
909
struct bio *bio;
drivers/md/dm-thin.c
911
while ((bio = bio_list_pop(&cell->bios))) {
drivers/md/dm-thin.c
912
if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
drivers/md/dm-thin.c
913
bio_list_add(&info->defer_bios, bio);
drivers/md/dm-thin.c
915
inc_all_io_entry(info->tc->pool, bio);
drivers/md/dm-thin.c
922
bio_list_add(&info->issue_bios, bio);
drivers/md/dm-thin.c
931
struct bio *bio;
drivers/md/dm-thin.c
946
while ((bio = bio_list_pop(&info.defer_bios)))
drivers/md/dm-thin.c
947
thin_defer_bio(tc, bio);
drivers/md/dm-thin.c
949
while ((bio = bio_list_pop(&info.issue_bios)))
drivers/md/dm-thin.c
950
remap_and_issue(info.tc, bio, block);
drivers/md/dm-thin.c
960
static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
drivers/md/dm-thin.c
968
if (!bio_triggers_commit(tc, bio)) {
drivers/md/dm-thin.c
969
bio_endio(bio);
drivers/md/dm-thin.c
979
bio_io_error(bio);
drivers/md/dm-thin.c
988
bio_list_add(&pool->deferred_flush_completions, bio);
drivers/md/dm-thin.c
996
struct bio *bio = m->bio;
drivers/md/dm-unstripe.c
117
static sector_t map_to_core(struct dm_target *ti, struct bio *bio)
drivers/md/dm-unstripe.c
120
sector_t sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
drivers/md/dm-unstripe.c
135
static int unstripe_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-unstripe.c
139
bio_set_dev(bio, uc->dev->bdev);
drivers/md/dm-unstripe.c
140
bio->bi_iter.bi_sector = map_to_core(ti, bio) + uc->physical_start;
drivers/md/dm-vdo/block-map.c
1048
static void write_cache_page_endio(struct bio *bio)
drivers/md/dm-vdo/block-map.c
1050
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/block-map.c
1630
static void write_page_endio(struct bio *bio);
drivers/md/dm-vdo/block-map.c
1655
static void write_page_endio(struct bio *bio)
drivers/md/dm-vdo/block-map.c
1657
struct pooled_vio *vio = bio->bi_private;
drivers/md/dm-vdo/block-map.c
1880
static void load_page_endio(struct bio *bio)
drivers/md/dm-vdo/block-map.c
1882
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/block-map.c
2561
pbn_from_vio_bio(cursor->vio->vio.bio), page);
drivers/md/dm-vdo/block-map.c
2565
static void traversal_endio(struct bio *bio)
drivers/md/dm-vdo/block-map.c
2567
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/block-map.c
766
static void load_cache_page_endio(struct bio *bio)
drivers/md/dm-vdo/block-map.c
768
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/block-map.c
819
static void flush_endio(struct bio *bio)
drivers/md/dm-vdo/block-map.c
821
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/data-vio.c
1458
struct bio *bio = data_vio->user_bio;
drivers/md/dm-vdo/data-vio.c
1462
if (bio_op(bio) == REQ_OP_DISCARD) {
drivers/md/dm-vdo/data-vio.c
1467
copy_from_bio(bio, data + data_vio->offset);
drivers/md/dm-vdo/data-vio.c
1505
static void read_endio(struct bio *bio)
drivers/md/dm-vdo/data-vio.c
1507
struct data_vio *data_vio = vio_as_data_vio(bio->bi_private);
drivers/md/dm-vdo/data-vio.c
1508
int result = blk_status_to_errno(bio->bi_status);
drivers/md/dm-vdo/data-vio.c
1510
vdo_count_completed_bios(bio);
drivers/md/dm-vdo/data-vio.c
1569
bio_reset(vio->bio, vio->bio->bi_bdev, opf);
drivers/md/dm-vdo/data-vio.c
1570
bio_init_clone(data_vio->user_bio->bi_bdev, vio->bio,
drivers/md/dm-vdo/data-vio.c
1574
vdo_set_bio_properties(vio->bio, vio, read_endio, opf,
drivers/md/dm-vdo/data-vio.c
1862
static void write_bio_finished(struct bio *bio)
drivers/md/dm-vdo/data-vio.c
1864
struct data_vio *data_vio = vio_as_data_vio((struct vio *) bio->bi_private);
drivers/md/dm-vdo/data-vio.c
1866
vdo_count_completed_bios(bio);
drivers/md/dm-vdo/data-vio.c
1868
blk_status_to_errno(bio->bi_status));
drivers/md/dm-vdo/data-vio.c
222
static inline u64 get_arrival_time(struct bio *bio)
drivers/md/dm-vdo/data-vio.c
224
return (u64) bio->bi_private;
drivers/md/dm-vdo/data-vio.c
276
struct bio *bio = data_vio->user_bio;
drivers/md/dm-vdo/data-vio.c
279
if (bio == NULL)
drivers/md/dm-vdo/data-vio.c
287
vdo_count_bios(&vdo->stats.bios_acknowledged, bio);
drivers/md/dm-vdo/data-vio.c
289
vdo_count_bios(&vdo->stats.bios_acknowledged_partial, bio);
drivers/md/dm-vdo/data-vio.c
291
bio->bi_status = errno_to_blk_status(error);
drivers/md/dm-vdo/data-vio.c
292
bio_endio(bio);
drivers/md/dm-vdo/data-vio.c
295
static void copy_to_bio(struct bio *bio, char *data_ptr)
drivers/md/dm-vdo/data-vio.c
300
bio_for_each_segment(biovec, bio, iter) {
drivers/md/dm-vdo/data-vio.c
517
static void copy_from_bio(struct bio *bio, char *data_ptr)
drivers/md/dm-vdo/data-vio.c
522
bio_for_each_segment(biovec, bio, iter) {
drivers/md/dm-vdo/data-vio.c
528
static void launch_bio(struct vdo *vdo, struct data_vio *data_vio, struct bio *bio)
drivers/md/dm-vdo/data-vio.c
538
data_vio->user_bio = bio;
drivers/md/dm-vdo/data-vio.c
539
data_vio->offset = to_bytes(bio->bi_iter.bi_sector & VDO_SECTORS_PER_BLOCK_MASK);
drivers/md/dm-vdo/data-vio.c
540
data_vio->is_partial = (bio->bi_iter.bi_size < VDO_BLOCK_SIZE) || (data_vio->offset != 0);
drivers/md/dm-vdo/data-vio.c
547
if (bio_op(bio) == REQ_OP_DISCARD) {
drivers/md/dm-vdo/data-vio.c
548
data_vio->remaining_discard = bio->bi_iter.bi_size;
drivers/md/dm-vdo/data-vio.c
552
vdo_count_bios(&vdo->stats.bios_in_partial, bio);
drivers/md/dm-vdo/data-vio.c
556
vdo_count_bios(&vdo->stats.bios_in_partial, bio);
drivers/md/dm-vdo/data-vio.c
558
if (bio_data_dir(bio) == WRITE)
drivers/md/dm-vdo/data-vio.c
560
} else if (bio_data_dir(bio) == READ) {
drivers/md/dm-vdo/data-vio.c
567
copy_from_bio(bio, data_vio->vio.data);
drivers/md/dm-vdo/data-vio.c
575
lbn = (bio->bi_iter.bi_sector - vdo->starting_sector_offset) / VDO_SECTORS_PER_BLOCK;
drivers/md/dm-vdo/data-vio.c
581
struct bio *bio = bio_list_pop(limiter->permitted_waiters);
drivers/md/dm-vdo/data-vio.c
583
launch_bio(limiter->pool->completion.vdo, data_vio, bio);
drivers/md/dm-vdo/data-vio.c
586
bio = bio_list_peek(limiter->permitted_waiters);
drivers/md/dm-vdo/data-vio.c
587
limiter->arrival = ((bio == NULL) ? U64_MAX : get_arrival_time(bio));
drivers/md/dm-vdo/data-vio.c
592
struct bio *bio = bio_list_pop(&limiter->waiters);
drivers/md/dm-vdo/data-vio.c
595
limiter->arrival = get_arrival_time(bio);
drivers/md/dm-vdo/data-vio.c
597
bio_list_add(limiter->permitted_waiters, bio);
drivers/md/dm-vdo/data-vio.c
708
struct bio *bio = bio_list_peek(&pool->limiter.waiters);
drivers/md/dm-vdo/data-vio.c
710
if (bio != NULL)
drivers/md/dm-vdo/data-vio.c
711
pool->limiter.arrival = get_arrival_time(bio);
drivers/md/dm-vdo/data-vio.c
785
struct bio *bio;
drivers/md/dm-vdo/data-vio.c
808
result = vdo_create_bio(&bio);
drivers/md/dm-vdo/data-vio.c
815
initialize_vio(&data_vio->vio, bio, 1, VIO_TYPE_DATA, VIO_PRIORITY_DATA, vdo);
drivers/md/dm-vdo/data-vio.c
825
vdo_free_bio(vdo_forget(data_vio->vio.bio));
drivers/md/dm-vdo/data-vio.c
940
static void wait_permit(struct limiter *limiter, struct bio *bio)
drivers/md/dm-vdo/data-vio.c
945
bio_list_add(&limiter->new_waiters, bio);
drivers/md/dm-vdo/data-vio.c
960
void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio)
drivers/md/dm-vdo/data-vio.c
967
bio->bi_private = (void *) jiffies;
drivers/md/dm-vdo/data-vio.c
969
if ((bio_op(bio) == REQ_OP_DISCARD) &&
drivers/md/dm-vdo/data-vio.c
971
wait_permit(&pool->discard_limiter, bio);
drivers/md/dm-vdo/data-vio.c
976
wait_permit(&pool->limiter, bio);
drivers/md/dm-vdo/data-vio.c
982
launch_bio(pool->completion.vdo, data_vio, bio);
drivers/md/dm-vdo/data-vio.h
251
struct bio *user_bio;
drivers/md/dm-vdo/data-vio.h
334
void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio);
drivers/md/dm-vdo/dedupe.c
1160
static void verify_endio(struct bio *bio)
drivers/md/dm-vdo/dedupe.c
1162
struct data_vio *agent = vio_as_data_vio(bio->bi_private);
drivers/md/dm-vdo/dedupe.c
1163
int result = blk_status_to_errno(bio->bi_status);
drivers/md/dm-vdo/dedupe.c
1165
vdo_count_completed_bios(bio);
drivers/md/dm-vdo/dm-vdo-target.c
896
static int vdo_map_bio(struct dm_target *ti, struct bio *bio)
drivers/md/dm-vdo/dm-vdo-target.c
906
vdo_count_bios(&vdo->stats.bios_in, bio);
drivers/md/dm-vdo/dm-vdo-target.c
910
if ((bio_op(bio) == REQ_OP_FLUSH) || ((bio->bi_opf & REQ_PREFLUSH) != 0)) {
drivers/md/dm-vdo/dm-vdo-target.c
911
vdo_launch_flush(vdo, bio);
drivers/md/dm-vdo/dm-vdo-target.c
919
vdo_launch_bio(vdo->data_vio_pool, bio);
drivers/md/dm-vdo/flush.c
392
void vdo_launch_flush(struct vdo *vdo, struct bio *bio)
drivers/md/dm-vdo/flush.c
408
bio_list_add(&flusher->waiting_flush_bios, bio);
drivers/md/dm-vdo/flush.c
464
struct bio *bio;
drivers/md/dm-vdo/flush.c
466
while ((bio = bio_list_pop(&flush->bios)) != NULL) {
drivers/md/dm-vdo/flush.c
471
vdo_count_bios(&vdo->stats.bios_acknowledged, bio);
drivers/md/dm-vdo/flush.c
474
bio_set_dev(bio, vdo_get_backing_device(vdo));
drivers/md/dm-vdo/flush.c
476
submit_bio_noacct(bio);
drivers/md/dm-vdo/flush.h
38
void vdo_launch_flush(struct vdo *vdo, struct bio *bio);
drivers/md/dm-vdo/io-submitter.c
107
static void send_bio_to_device(struct vio *vio, struct bio *bio)
drivers/md/dm-vdo/io-submitter.c
113
count_all_bios(vio, bio);
drivers/md/dm-vdo/io-submitter.c
114
bio_set_dev(bio, vdo_get_backing_device(vdo));
drivers/md/dm-vdo/io-submitter.c
115
submit_bio_noacct(bio);
drivers/md/dm-vdo/io-submitter.c
127
send_bio_to_device(vio, vio->bio);
drivers/md/dm-vdo/io-submitter.c
139
static struct bio *get_bio_list(struct vio *vio)
drivers/md/dm-vdo/io-submitter.c
141
struct bio *bio;
drivers/md/dm-vdo/io-submitter.c
152
bio = vio->bios_merged.head;
drivers/md/dm-vdo/io-submitter.c
156
return bio;
drivers/md/dm-vdo/io-submitter.c
168
struct bio *bio, *next;
drivers/md/dm-vdo/io-submitter.c
172
for (bio = get_bio_list(vio); bio != NULL; bio = next) {
drivers/md/dm-vdo/io-submitter.c
173
next = bio->bi_next;
drivers/md/dm-vdo/io-submitter.c
174
bio->bi_next = NULL;
drivers/md/dm-vdo/io-submitter.c
175
send_bio_to_device((struct vio *) bio->bi_private, bio);
drivers/md/dm-vdo/io-submitter.c
194
struct bio *bio = vio->bio;
drivers/md/dm-vdo/io-submitter.c
195
sector_t merge_sector = bio->bi_iter.bi_sector;
drivers/md/dm-vdo/io-submitter.c
211
if (bio_data_dir(bio) != bio_data_dir(vio_merge->bio))
drivers/md/dm-vdo/io-submitter.c
273
struct bio *bio = vio->bio;
drivers/md/dm-vdo/io-submitter.c
279
bio->bi_next = NULL;
drivers/md/dm-vdo/io-submitter.c
281
bio_list_add(&vio->bios_merged, bio);
drivers/md/dm-vdo/io-submitter.c
293
bio->bi_iter.bi_sector,
drivers/md/dm-vdo/io-submitter.c
74
static void count_all_bios(struct vio *vio, struct bio *bio)
drivers/md/dm-vdo/io-submitter.c
79
vdo_count_bios(&stats->bios_out, bio);
drivers/md/dm-vdo/io-submitter.c
83
vdo_count_bios(&stats->bios_meta, bio);
drivers/md/dm-vdo/io-submitter.c
85
vdo_count_bios(&stats->bios_journal, bio);
drivers/md/dm-vdo/io-submitter.c
87
vdo_count_bios(&stats->bios_page_cache, bio);
drivers/md/dm-vdo/packer.c
410
static void compressed_write_end_io(struct bio *bio)
drivers/md/dm-vdo/packer.c
412
struct data_vio *data_vio = vio_as_data_vio(bio->bi_private);
drivers/md/dm-vdo/packer.c
414
vdo_count_completed_bios(bio);
drivers/md/dm-vdo/packer.c
416
continue_data_vio_with_error(data_vio, blk_status_to_errno(bio->bi_status));
drivers/md/dm-vdo/recovery-journal.c
1321
static void complete_write_endio(struct bio *bio)
drivers/md/dm-vdo/recovery-journal.c
1323
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/recovery-journal.c
506
static void flush_endio(struct bio *bio)
drivers/md/dm-vdo/recovery-journal.c
508
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/repair.c
1681
static void read_journal_endio(struct bio *bio)
drivers/md/dm-vdo/repair.c
1683
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/slab-depot.c
1153
static void write_reference_block_endio(struct bio *bio)
drivers/md/dm-vdo/slab-depot.c
1155
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/slab-depot.c
2329
static void load_reference_block_endio(struct bio *bio)
drivers/md/dm-vdo/slab-depot.c
2331
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/slab-depot.c
2523
static void read_slab_journal_tail_endio(struct bio *bio)
drivers/md/dm-vdo/slab-depot.c
2525
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/slab-depot.c
2984
static void read_slab_journal_endio(struct bio *bio)
drivers/md/dm-vdo/slab-depot.c
2986
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/slab-depot.c
2989
continue_vio_after_io(bio->bi_private, apply_journal_entries,
drivers/md/dm-vdo/slab-depot.c
303
static void write_slab_summary_endio(struct bio *bio)
drivers/md/dm-vdo/slab-depot.c
305
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/slab-depot.c
435
static void flush_endio(struct bio *bio)
drivers/md/dm-vdo/slab-depot.c
437
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/slab-depot.c
4552
static void write_summary_endio(struct bio *bio)
drivers/md/dm-vdo/slab-depot.c
4554
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/slab-depot.c
4619
static void load_summary_endio(struct bio *bio)
drivers/md/dm-vdo/slab-depot.c
4621
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/slab-depot.c
729
static void write_slab_journal_endio(struct bio *bio)
drivers/md/dm-vdo/slab-depot.c
731
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/types.h
386
struct bio *bio;
drivers/md/dm-vdo/vdo.c
296
bio_set_dev(vio->bio, vdo_get_backing_device(vdo));
drivers/md/dm-vdo/vdo.c
297
submit_bio_wait(vio->bio);
drivers/md/dm-vdo/vdo.c
298
result = blk_status_to_errno(vio->bio->bi_status);
drivers/md/dm-vdo/vdo.c
765
static void read_super_block_endio(struct bio *bio)
drivers/md/dm-vdo/vdo.c
767
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/vdo.c
828
struct bio bio;
drivers/md/dm-vdo/vdo.c
830
bio_init(&bio, vdo_get_backing_device(vdo), NULL, 0,
drivers/md/dm-vdo/vdo.c
832
submit_bio_wait(&bio);
drivers/md/dm-vdo/vdo.c
833
result = blk_status_to_errno(bio.bi_status);
drivers/md/dm-vdo/vdo.c
841
bio_uninit(&bio);
drivers/md/dm-vdo/vdo.c
938
static void super_block_write_endio(struct bio *bio)
drivers/md/dm-vdo/vdo.c
940
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/vio.c
100
initialize_vio(vio, bio, block_count, vio_type, priority, vdo);
drivers/md/dm-vdo/vio.c
159
vdo_free_bio(vdo_forget(vio->bio));
drivers/md/dm-vdo/vio.c
173
void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callback,
drivers/md/dm-vdo/vio.c
183
bio->bi_private = vio;
drivers/md/dm-vdo/vio.c
184
bio->bi_end_io = callback;
drivers/md/dm-vdo/vio.c
185
bio->bi_opf = bi_opf;
drivers/md/dm-vdo/vio.c
186
bio->bi_iter.bi_sector = pbn * VDO_SECTORS_PER_BLOCK;
drivers/md/dm-vdo/vio.c
205
struct bio *bio = vio->bio;
drivers/md/dm-vdo/vio.c
209
bio_reset(bio, bio->bi_bdev, bi_opf);
drivers/md/dm-vdo/vio.c
210
vdo_set_bio_properties(bio, vio, callback, bi_opf, pbn);
drivers/md/dm-vdo/vio.c
214
bio->bi_ioprio = 0;
drivers/md/dm-vdo/vio.c
215
bio->bi_io_vec = bio_inline_vecs(bio);
drivers/md/dm-vdo/vio.c
216
bio->bi_max_vecs = vio->block_count + 1;
drivers/md/dm-vdo/vio.c
234
bytes_added = bio_add_page(bio, page, bytes, offset);
drivers/md/dm-vdo/vio.c
289
physical_block_number_t pbn = pbn_from_vio_bio(vio->bio);
drivers/md/dm-vdo/vio.c
291
if (bio_op(vio->bio) == REQ_OP_READ) {
drivers/md/dm-vdo/vio.c
293
} else if ((vio->bio->bi_opf & REQ_PREFLUSH) == REQ_PREFLUSH) {
drivers/md/dm-vdo/vio.c
294
description = (((vio->bio->bi_opf & REQ_FUA) == REQ_FUA) ?
drivers/md/dm-vdo/vio.c
297
} else if ((vio->bio->bi_opf & REQ_FUA) == REQ_FUA) {
drivers/md/dm-vdo/vio.c
41
physical_block_number_t pbn_from_vio_bio(struct bio *bio)
drivers/md/dm-vdo/vio.c
43
struct vio *vio = bio->bi_private;
drivers/md/dm-vdo/vio.c
45
physical_block_number_t pbn = bio->bi_iter.bi_sector / VDO_SECTORS_PER_BLOCK;
drivers/md/dm-vdo/vio.c
459
void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio)
drivers/md/dm-vdo/vio.c
461
if (((bio->bi_opf & REQ_PREFLUSH) != 0) && (bio->bi_iter.bi_size == 0)) {
drivers/md/dm-vdo/vio.c
467
switch (bio_op(bio)) {
drivers/md/dm-vdo/vio.c
483
bio_op(bio));
drivers/md/dm-vdo/vio.c
486
if ((bio->bi_opf & REQ_PREFLUSH) != 0)
drivers/md/dm-vdo/vio.c
488
if (bio->bi_opf & REQ_FUA)
drivers/md/dm-vdo/vio.c
492
static void count_all_bios_completed(struct vio *vio, struct bio *bio)
drivers/md/dm-vdo/vio.c
497
vdo_count_bios(&stats->bios_out_completed, bio);
drivers/md/dm-vdo/vio.c
50
static int create_multi_block_bio(block_count_t size, struct bio **bio_ptr)
drivers/md/dm-vdo/vio.c
501
vdo_count_bios(&stats->bios_meta_completed, bio);
drivers/md/dm-vdo/vio.c
503
vdo_count_bios(&stats->bios_journal_completed, bio);
drivers/md/dm-vdo/vio.c
505
vdo_count_bios(&stats->bios_page_cache_completed, bio);
drivers/md/dm-vdo/vio.c
508
void vdo_count_completed_bios(struct bio *bio)
drivers/md/dm-vdo/vio.c
510
struct vio *vio = (struct vio *) bio->bi_private;
drivers/md/dm-vdo/vio.c
513
count_all_bios_completed(vio, bio);
drivers/md/dm-vdo/vio.c
52
struct bio *bio = NULL;
drivers/md/dm-vdo/vio.c
55
result = vdo_allocate_extended(struct bio, size + 1, struct bio_vec,
drivers/md/dm-vdo/vio.c
56
"bio", &bio);
drivers/md/dm-vdo/vio.c
60
*bio_ptr = bio;
drivers/md/dm-vdo/vio.c
64
int vdo_create_bio(struct bio **bio_ptr)
drivers/md/dm-vdo/vio.c
69
void vdo_free_bio(struct bio *bio)
drivers/md/dm-vdo/vio.c
71
if (bio == NULL)
drivers/md/dm-vdo/vio.c
74
bio_uninit(bio);
drivers/md/dm-vdo/vio.c
75
vdo_free(vdo_forget(bio));
drivers/md/dm-vdo/vio.c
82
struct bio *bio;
drivers/md/dm-vdo/vio.c
96
result = create_multi_block_bio(block_count, &bio);
drivers/md/dm-vdo/vio.h
109
static inline void initialize_vio(struct vio *vio, struct bio *bio,
drivers/md/dm-vdo/vio.h
116
vio->bio = bio;
drivers/md/dm-vdo/vio.h
123
void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callback,
drivers/md/dm-vdo/vio.h
169
void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio);
drivers/md/dm-vdo/vio.h
170
void vdo_count_completed_bios(struct bio *bio);
drivers/md/dm-vdo/vio.h
181
vdo_count_completed_bios(vio->bio);
drivers/md/dm-vdo/vio.h
183
continue_vio(vio, blk_status_to_errno(vio->bio->bi_status));
drivers/md/dm-vdo/vio.h
61
physical_block_number_t __must_check pbn_from_vio_bio(struct bio *bio);
drivers/md/dm-vdo/vio.h
74
(unsigned long long) pbn_from_vio_bio(vio->bio), thread_id,
drivers/md/dm-vdo/vio.h
78
int vdo_create_bio(struct bio **bio_ptr);
drivers/md/dm-vdo/vio.h
79
void vdo_free_bio(struct bio *bio);
drivers/md/dm-verity-fec.c
108
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
drivers/md/dm-verity-fec.c
111
par_buf_offset, &buf, bio->bi_ioprio);
drivers/md/dm-verity-fec.c
153
par_buf_offset, &buf, bio->bi_ioprio);
drivers/md/dm-verity-fec.c
205
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
drivers/md/dm-verity-fec.c
244
bbuf = dm_bufio_read_with_ioprio(bufio, block, &buf, bio->bi_ioprio);
drivers/md/dm-verity-target.c
250
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
drivers/md/dm-verity-target.c
266
&buf, bio->bi_ioprio);
drivers/md/dm-verity-target.c
317
struct bio *bio;
drivers/md/dm-verity-target.c
319
bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
drivers/md/dm-verity-target.c
320
dm_audit_log_bio(DM_MSG_PREFIX, "verify-metadata", bio,
drivers/md/dm-verity-target.c
420
struct bio *bio,
drivers/md/dm-verity-target.c
442
if (bio->bi_status)
drivers/md/dm-verity-target.c
447
dm_audit_log_bio(DM_MSG_PREFIX, "verify-data", bio, blkno, 0);
drivers/md/dm-verity-target.c
466
struct bio *bio)
drivers/md/dm-verity-target.c
496
r = verity_handle_data_hash_mismatch(v, io, bio, block);
drivers/md/dm-verity-target.c
515
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
drivers/md/dm-verity-target.c
532
b++, bio_advance_iter_single(bio, iter, block_size)) {
drivers/md/dm-verity-target.c
539
if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
drivers/md/dm-verity-target.c
550
bv = bio_iter_iovec(bio, *iter);
drivers/md/dm-verity-target.c
577
r = verity_verify_pending_blocks(v, io, bio);
drivers/md/dm-verity-target.c
584
r = verity_verify_pending_blocks(v, io, bio);
drivers/md/dm-verity-target.c
616
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
drivers/md/dm-verity-target.c
618
bio->bi_end_io = io->orig_bi_end_io;
drivers/md/dm-verity-target.c
619
bio->bi_status = status;
drivers/md/dm-verity-target.c
624
unlikely(!(bio->bi_opf & REQ_RAHEAD)) &&
drivers/md/dm-verity-target.c
641
bio_endio(bio);
drivers/md/dm-verity-target.c
677
static void verity_end_io(struct bio *bio)
drivers/md/dm-verity-target.c
679
struct dm_verity_io *io = bio->bi_private;
drivers/md/dm-verity-target.c
680
unsigned short ioprio = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
drivers/md/dm-verity-target.c
683
if (bio->bi_status &&
drivers/md/dm-verity-target.c
686
(bio->bi_opf & REQ_RAHEAD))) {
drivers/md/dm-verity-target.c
687
verity_finish_io(io, bio->bi_status);
drivers/md/dm-verity-target.c
785
static int verity_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-verity-target.c
790
bio_set_dev(bio, v->data_dev->bdev);
drivers/md/dm-verity-target.c
791
bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
drivers/md/dm-verity-target.c
793
if (((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
drivers/md/dm-verity-target.c
799
if (bio_end_sector(bio) >>
drivers/md/dm-verity-target.c
805
if (bio_data_dir(bio) == WRITE)
drivers/md/dm-verity-target.c
808
io = dm_per_bio_data(bio, ti->per_io_data_size);
drivers/md/dm-verity-target.c
810
io->orig_bi_end_io = bio->bi_end_io;
drivers/md/dm-verity-target.c
811
io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
drivers/md/dm-verity-target.c
812
io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
drivers/md/dm-verity-target.c
815
bio->bi_end_io = verity_end_io;
drivers/md/dm-verity-target.c
816
bio->bi_private = io;
drivers/md/dm-verity-target.c
817
io->iter = bio->bi_iter;
drivers/md/dm-verity-target.c
821
verity_submit_prefetch(v, io, bio->bi_ioprio);
drivers/md/dm-verity-target.c
823
submit_bio_noacct(bio);
drivers/md/dm-writecache.c
1246
static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
drivers/md/dm-writecache.c
1250
int rw = bio_data_dir(bio);
drivers/md/dm-writecache.c
1254
struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
drivers/md/dm-writecache.c
1265
flush_dcache_page(bio_page(bio));
drivers/md/dm-writecache.c
1268
bio->bi_status = BLK_STS_IOERR;
drivers/md/dm-writecache.c
1271
flush_dcache_page(bio_page(bio));
drivers/md/dm-writecache.c
1279
bio_advance(bio, size);
drivers/md/dm-writecache.c
1288
struct bio *bio;
drivers/md/dm-writecache.c
1291
bio = bio_list_pop(&wc->flush_list);
drivers/md/dm-writecache.c
1292
if (!bio) {
drivers/md/dm-writecache.c
1305
if (bio_op(bio) == REQ_OP_DISCARD) {
drivers/md/dm-writecache.c
1306
writecache_discard(wc, bio->bi_iter.bi_sector,
drivers/md/dm-writecache.c
1307
bio_end_sector(bio));
drivers/md/dm-writecache.c
1309
bio_set_dev(bio, wc->dev->bdev);
drivers/md/dm-writecache.c
1310
submit_bio_noacct(bio);
drivers/md/dm-writecache.c
1315
bio->bi_status = BLK_STS_IOERR;
drivers/md/dm-writecache.c
1316
bio_endio(bio);
drivers/md/dm-writecache.c
1323
static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
drivers/md/dm-writecache.c
1327
bio_list_add(&wc->flush_list, bio);
drivers/md/dm-writecache.c
1338
static void writecache_map_remap_origin(struct dm_writecache *wc, struct bio *bio,
drivers/md/dm-writecache.c
1343
read_original_sector(wc, e) - bio->bi_iter.bi_sector;
drivers/md/dm-writecache.c
1344
if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT)
drivers/md/dm-writecache.c
1345
dm_accept_partial_bio(bio, next_boundary);
drivers/md/dm-writecache.c
1349
static enum wc_map_op writecache_map_read(struct dm_writecache *wc, struct bio *bio)
drivers/md/dm-writecache.c
1356
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
drivers/md/dm-writecache.c
1357
if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
drivers/md/dm-writecache.c
1360
bio_copy_block(wc, bio, memory_data(wc, e));
drivers/md/dm-writecache.c
1361
if (bio->bi_iter.bi_size)
drivers/md/dm-writecache.c
1365
dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
drivers/md/dm-writecache.c
1366
bio_set_dev(bio, wc->ssd_dev->bdev);
drivers/md/dm-writecache.c
1367
bio->bi_iter.bi_sector = cache_sector(wc, e);
drivers/md/dm-writecache.c
1373
writecache_map_remap_origin(wc, bio, e);
drivers/md/dm-writecache.c
1374
wc->stats.reads += (bio->bi_iter.bi_size - wc->block_size) >> wc->block_size_bits;
drivers/md/dm-writecache.c
1381
static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
drivers/md/dm-writecache.c
1388
while (bio_size < bio->bi_iter.bi_size) {
drivers/md/dm-writecache.c
1394
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
drivers/md/dm-writecache.c
1420
bio_set_dev(bio, wc->ssd_dev->bdev);
drivers/md/dm-writecache.c
1421
bio->bi_iter.bi_sector = start_cache_sec;
drivers/md/dm-writecache.c
1422
dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
drivers/md/dm-writecache.c
1424
wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
drivers/md/dm-writecache.c
1425
wc->stats.writes_allocate += (bio->bi_iter.bi_size - wc->block_size) >> wc->block_size_bits;
drivers/md/dm-writecache.c
1435
static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio *bio)
drivers/md/dm-writecache.c
1444
wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
drivers/md/dm-writecache.c
1447
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
drivers/md/dm-writecache.c
1463
(wc->metadata_only && !(bio->bi_opf & REQ_META)))
drivers/md/dm-writecache.c
1470
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
drivers/md/dm-writecache.c
1471
writecache_map_remap_origin(wc, bio, e);
drivers/md/dm-writecache.c
1472
wc->stats.writes_around += bio->bi_iter.bi_size >> wc->block_size_bits;
drivers/md/dm-writecache.c
1473
wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
drivers/md/dm-writecache.c
1480
write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
drivers/md/dm-writecache.c
1486
bio_copy_block(wc, bio, memory_data(wc, e));
drivers/md/dm-writecache.c
1489
writecache_bio_copy_ssd(wc, bio, e, search_used);
drivers/md/dm-writecache.c
1492
} while (bio->bi_iter.bi_size);
drivers/md/dm-writecache.c
1494
if (unlikely(bio->bi_opf & REQ_FUA || wc->uncommitted_blocks >= wc->autocommit_blocks))
drivers/md/dm-writecache.c
1502
static enum wc_map_op writecache_map_flush(struct dm_writecache *wc, struct bio *bio)
drivers/md/dm-writecache.c
1517
if (dm_bio_get_target_bio_nr(bio))
drivers/md/dm-writecache.c
1520
writecache_offload_bio(wc, bio);
drivers/md/dm-writecache.c
1524
static enum wc_map_op writecache_map_discard(struct dm_writecache *wc, struct bio *bio)
drivers/md/dm-writecache.c
1526
wc->stats.discards += bio->bi_iter.bi_size >> wc->block_size_bits;
drivers/md/dm-writecache.c
1532
writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
drivers/md/dm-writecache.c
1536
writecache_offload_bio(wc, bio);
drivers/md/dm-writecache.c
1540
static int writecache_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-writecache.c
1545
bio->bi_private = NULL;
drivers/md/dm-writecache.c
1549
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
drivers/md/dm-writecache.c
1550
map_op = writecache_map_flush(wc, bio);
drivers/md/dm-writecache.c
1554
bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
drivers/md/dm-writecache.c
1556
if (unlikely((((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
drivers/md/dm-writecache.c
1559
(unsigned long long)bio->bi_iter.bi_sector,
drivers/md/dm-writecache.c
1560
bio->bi_iter.bi_size, wc->block_size);
drivers/md/dm-writecache.c
1565
if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
drivers/md/dm-writecache.c
1566
map_op = writecache_map_discard(wc, bio);
drivers/md/dm-writecache.c
1570
if (bio_data_dir(bio) == READ)
drivers/md/dm-writecache.c
1571
map_op = writecache_map_read(wc, bio);
drivers/md/dm-writecache.c
1573
map_op = writecache_map_write(wc, bio);
drivers/md/dm-writecache.c
1578
if (bio_op(bio) == REQ_OP_WRITE) {
drivers/md/dm-writecache.c
1580
bio->bi_private = (void *)2;
drivers/md/dm-writecache.c
1583
bio_set_dev(bio, wc->dev->bdev);
drivers/md/dm-writecache.c
1589
bio->bi_private = (void *)1;
drivers/md/dm-writecache.c
1590
atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
drivers/md/dm-writecache.c
1596
bio_endio(bio);
drivers/md/dm-writecache.c
1605
bio_io_error(bio);
drivers/md/dm-writecache.c
1615
static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
drivers/md/dm-writecache.c
1619
if (bio->bi_private == (void *)1) {
drivers/md/dm-writecache.c
1620
int dir = bio_data_dir(bio);
drivers/md/dm-writecache.c
1625
} else if (bio->bi_private == (void *)2) {
drivers/md/dm-writecache.c
1654
static void writecache_writeback_endio(struct bio *bio)
drivers/md/dm-writecache.c
1656
struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
drivers/md/dm-writecache.c
1692
if (unlikely(wb->bio.bi_status != BLK_STS_OK))
drivers/md/dm-writecache.c
1693
writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
drivers/md/dm-writecache.c
1694
"write error %d", wb->bio.bi_status);
drivers/md/dm-writecache.c
1716
bio_put(&wb->bio);
drivers/md/dm-writecache.c
1804
if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
drivers/md/dm-writecache.c
1807
return bio_add_page(&wb->bio, persistent_memory_page(address),
drivers/md/dm-writecache.c
1832
struct bio *bio;
drivers/md/dm-writecache.c
1843
bio = bio_alloc_bioset(wc->dev->bdev, max_pages, REQ_OP_WRITE,
drivers/md/dm-writecache.c
1845
wb = container_of(bio, struct writeback_struct, bio);
drivers/md/dm-writecache.c
1847
bio->bi_end_io = writecache_writeback_endio;
drivers/md/dm-writecache.c
1848
bio->bi_iter.bi_sector = read_original_sector(wc, e);
drivers/md/dm-writecache.c
1877
bio->bi_opf |= REQ_FUA;
drivers/md/dm-writecache.c
1879
bio->bi_status = BLK_STS_IOERR;
drivers/md/dm-writecache.c
1880
bio_endio(bio);
drivers/md/dm-writecache.c
1881
} else if (unlikely(!bio_sectors(bio))) {
drivers/md/dm-writecache.c
1882
bio->bi_status = BLK_STS_OK;
drivers/md/dm-writecache.c
1883
bio_endio(bio);
drivers/md/dm-writecache.c
1885
submit_bio(bio);
drivers/md/dm-writecache.c
228
struct bio bio;
drivers/md/dm-writecache.c
2328
offsetof(struct writeback_struct, bio),
drivers/md/dm-zero.c
38
static int zero_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-zero.c
40
switch (bio_op(bio)) {
drivers/md/dm-zero.c
42
if (bio->bi_opf & REQ_RAHEAD) {
drivers/md/dm-zero.c
46
zero_fill_bio(bio);
drivers/md/dm-zero.c
56
bio_endio(bio);
drivers/md/dm-zone.c
150
bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
drivers/md/dm-zone.c
157
switch (bio_op(bio)) {
drivers/md/dm-zone.c
160
return !op_is_flush(bio->bi_opf) && bio_sectors(bio);
drivers/md/dm-zone.c
464
void dm_zone_endio(struct dm_io *io, struct bio *clone)
drivers/md/dm-zone.c
468
struct bio *orig_bio = io->orig_bio;
drivers/md/dm-zoned-metadata.c
509
static void dmz_mblock_bio_end_io(struct bio *bio)
drivers/md/dm-zoned-metadata.c
511
struct dmz_mblock *mblk = bio->bi_private;
drivers/md/dm-zoned-metadata.c
514
if (bio->bi_status)
drivers/md/dm-zoned-metadata.c
517
if (bio_op(bio) == REQ_OP_WRITE)
drivers/md/dm-zoned-metadata.c
526
bio_put(bio);
drivers/md/dm-zoned-metadata.c
538
struct bio *bio;
drivers/md/dm-zoned-metadata.c
548
bio = bio_alloc(dev->bdev, 1, REQ_OP_READ | REQ_META | REQ_PRIO,
drivers/md/dm-zoned-metadata.c
561
bio_put(bio);
drivers/md/dm-zoned-metadata.c
572
bio->bi_iter.bi_sector = dmz_blk2sect(block);
drivers/md/dm-zoned-metadata.c
573
bio->bi_private = mblk;
drivers/md/dm-zoned-metadata.c
574
bio->bi_end_io = dmz_mblock_bio_end_io;
drivers/md/dm-zoned-metadata.c
575
__bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
drivers/md/dm-zoned-metadata.c
576
submit_bio(bio);
drivers/md/dm-zoned-metadata.c
713
struct bio *bio;
drivers/md/dm-zoned-metadata.c
718
bio = bio_alloc(dev->bdev, 1, REQ_OP_WRITE | REQ_META | REQ_PRIO,
drivers/md/dm-zoned-metadata.c
723
bio->bi_iter.bi_sector = dmz_blk2sect(block);
drivers/md/dm-zoned-metadata.c
724
bio->bi_private = mblk;
drivers/md/dm-zoned-metadata.c
725
bio->bi_end_io = dmz_mblock_bio_end_io;
drivers/md/dm-zoned-metadata.c
726
__bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
drivers/md/dm-zoned-metadata.c
727
submit_bio(bio);
drivers/md/dm-zoned-metadata.c
738
struct bio *bio;
drivers/md/dm-zoned-metadata.c
747
bio = bio_alloc(dev->bdev, 1, op | REQ_SYNC | REQ_META | REQ_PRIO,
drivers/md/dm-zoned-metadata.c
749
bio->bi_iter.bi_sector = dmz_blk2sect(block);
drivers/md/dm-zoned-metadata.c
750
__bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
drivers/md/dm-zoned-metadata.c
751
ret = submit_bio_wait(bio);
drivers/md/dm-zoned-metadata.c
752
bio_put(bio);
drivers/md/dm-zoned-target.c
103
static void dmz_clone_endio(struct bio *clone)
drivers/md/dm-zoned-target.c
109
dmz_bio_endio(bioctx->bio, status);
drivers/md/dm-zoned-target.c
117
struct bio *bio, sector_t chunk_block,
drivers/md/dm-zoned-target.c
121
dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
drivers/md/dm-zoned-target.c
123
struct bio *clone;
drivers/md/dm-zoned-target.c
128
clone = bio_alloc_clone(dev->bdev, bio, GFP_NOIO, &dmz->bio_set);
drivers/md/dm-zoned-target.c
139
bio_advance(bio, clone->bi_iter.bi_size);
drivers/md/dm-zoned-target.c
144
if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
drivers/md/dm-zoned-target.c
153
static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
drivers/md/dm-zoned-target.c
159
swap(bio->bi_iter.bi_size, size);
drivers/md/dm-zoned-target.c
160
zero_fill_bio(bio);
drivers/md/dm-zoned-target.c
161
swap(bio->bi_iter.bi_size, size);
drivers/md/dm-zoned-target.c
163
bio_advance(bio, size);
drivers/md/dm-zoned-target.c
170
struct bio *bio)
drivers/md/dm-zoned-target.c
173
sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
drivers/md/dm-zoned-target.c
174
unsigned int nr_blocks = dmz_bio_blocks(bio);
drivers/md/dm-zoned-target.c
181
zero_fill_bio(bio);
drivers/md/dm-zoned-target.c
187
(unsigned long long)dmz_bio_chunk(zmd, bio),
drivers/md/dm-zoned-target.c
22
struct bio *bio;
drivers/md/dm-zoned-target.c
229
ret = dmz_submit_bio(dmz, rzone, bio,
drivers/md/dm-zoned-target.c
236
dmz_handle_read_zero(dmz, bio, chunk_block, 1);
drivers/md/dm-zoned-target.c
250
struct dm_zone *zone, struct bio *bio,
drivers/md/dm-zoned-target.c
262
ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
drivers/md/dm-zoned-target.c
283
struct dm_zone *zone, struct bio *bio,
drivers/md/dm-zoned-target.c
300
ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
drivers/md/dm-zoned-target.c
319
struct bio *bio)
drivers/md/dm-zoned-target.c
322
sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
drivers/md/dm-zoned-target.c
323
unsigned int nr_blocks = dmz_bio_blocks(bio);
drivers/md/dm-zoned-target.c
330
(unsigned long long)dmz_bio_chunk(zmd, bio),
drivers/md/dm-zoned-target.c
343
return dmz_handle_direct_write(dmz, zone, bio,
drivers/md/dm-zoned-target.c
351
return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
drivers/md/dm-zoned-target.c
358
struct bio *bio)
drivers/md/dm-zoned-target.c
361
sector_t block = dmz_bio_block(bio);
drivers/md/dm-zoned-target.c
362
unsigned int nr_blocks = dmz_bio_blocks(bio);
drivers/md/dm-zoned-target.c
375
(unsigned long long)dmz_bio_chunk(zmd, bio),
drivers/md/dm-zoned-target.c
396
struct bio *bio)
drivers/md/dm-zoned-target.c
399
dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
drivers/md/dm-zoned-target.c
411
zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(zmd, bio),
drivers/md/dm-zoned-target.c
412
bio_op(bio));
drivers/md/dm-zoned-target.c
425
switch (bio_op(bio)) {
drivers/md/dm-zoned-target.c
427
ret = dmz_handle_read(dmz, zone, bio);
drivers/md/dm-zoned-target.c
430
ret = dmz_handle_write(dmz, zone, bio);
drivers/md/dm-zoned-target.c
434
ret = dmz_handle_discard(dmz, zone, bio);
drivers/md/dm-zoned-target.c
438
dmz_metadata_label(dmz->metadata), bio_op(bio));
drivers/md/dm-zoned-target.c
449
dmz_bio_endio(bio, errno_to_blk_status(ret));
drivers/md/dm-zoned-target.c
482
struct bio *bio;
drivers/md/dm-zoned-target.c
487
while ((bio = bio_list_pop(&cw->bio_list))) {
drivers/md/dm-zoned-target.c
489
dmz_handle_bio(dmz, cw, bio);
drivers/md/dm-zoned-target.c
506
struct bio *bio;
drivers/md/dm-zoned-target.c
518
bio = bio_list_pop(&dmz->flush_list);
drivers/md/dm-zoned-target.c
521
if (!bio)
drivers/md/dm-zoned-target.c
524
dmz_bio_endio(bio, errno_to_blk_status(ret));
drivers/md/dm-zoned-target.c
534
static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
drivers/md/dm-zoned-target.c
536
unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio);
drivers/md/dm-zoned-target.c
567
bio_list_add(&cw->bio_list, bio);
drivers/md/dm-zoned-target.c
625
static int dmz_map(struct dm_target *ti, struct bio *bio)
drivers/md/dm-zoned-target.c
629
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
drivers/md/dm-zoned-target.c
630
sector_t sector = bio->bi_iter.bi_sector;
drivers/md/dm-zoned-target.c
631
unsigned int nr_sectors = bio_sectors(bio);
drivers/md/dm-zoned-target.c
640
bio_op(bio), (unsigned long long)sector, nr_sectors,
drivers/md/dm-zoned-target.c
641
(unsigned long long)dmz_bio_chunk(zmd, bio),
drivers/md/dm-zoned-target.c
642
(unsigned long long)dmz_chunk_block(zmd, dmz_bio_block(bio)),
drivers/md/dm-zoned-target.c
643
(unsigned int)dmz_bio_blocks(bio));
drivers/md/dm-zoned-target.c
645
if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
drivers/md/dm-zoned-target.c
655
bioctx->bio = bio;
drivers/md/dm-zoned-target.c
659
if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
drivers/md/dm-zoned-target.c
661
bio_list_add(&dmz->flush_list, bio);
drivers/md/dm-zoned-target.c
670
dm_accept_partial_bio(bio, dmz_zone_nr_sectors(zmd) - chunk_sector);
drivers/md/dm-zoned-target.c
673
ret = dmz_queue_chunk_work(dmz, bio);
drivers/md/dm-zoned-target.c
677
bio_op(bio), (u64)dmz_bio_chunk(zmd, bio),
drivers/md/dm-zoned-target.c
75
static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
drivers/md/dm-zoned-target.c
78
dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
drivers/md/dm-zoned-target.c
80
if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
drivers/md/dm-zoned-target.c
81
bio->bi_status = status;
drivers/md/dm-zoned-target.c
82
if (bioctx->dev && bio->bi_status != BLK_STS_OK)
drivers/md/dm-zoned-target.c
89
if (bio->bi_status != BLK_STS_OK &&
drivers/md/dm-zoned-target.c
90
bio_op(bio) == REQ_OP_WRITE &&
drivers/md/dm-zoned-target.c
95
bio_endio(bio);
drivers/md/dm-zoned.h
45
#define dmz_bio_block(bio) dmz_sect2blk((bio)->bi_iter.bi_sector)
drivers/md/dm-zoned.h
46
#define dmz_bio_blocks(bio) dmz_sect2blk(bio_sectors(bio))
drivers/md/dm-zoned.h
83
#define dmz_bio_chunk(zmd, bio) ((bio)->bi_iter.bi_sector >> \
drivers/md/dm.c
100
if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
drivers/md/dm.c
101
return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
drivers/md/dm.c
102
return (char *)bio - DM_IO_BIO_OFFSET - data_size;
drivers/md/dm.c
106
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
drivers/md/dm.c
1078
static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
drivers/md/dm.c
1080
return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
drivers/md/dm.c
1083
static void clone_endio(struct bio *bio)
drivers/md/dm.c
1085
blk_status_t error = bio->bi_status;
drivers/md/dm.c
1086
struct dm_target_io *tio = clone_to_tio(bio);
drivers/md/dm.c
1093
if (bio_op(bio) == REQ_OP_DISCARD &&
drivers/md/dm.c
1094
!bdev_max_discard_sectors(bio->bi_bdev))
drivers/md/dm.c
1096
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
drivers/md/dm.c
1097
!bdev_write_zeroes_sectors(bio->bi_bdev))
drivers/md/dm.c
1102
unlikely(bdev_is_zoned(bio->bi_bdev)))
drivers/md/dm.c
1103
dm_zone_endio(io, bio);
drivers/md/dm.c
1106
int r = endio(ti, bio, &error);
drivers/md/dm.c
111
return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
drivers/md/dm.c
1116
if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
drivers/md/dm.c
113
return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET);
drivers/md/dm.c
1135
likely(ti != NULL) && unlikely(swap_bios_limit(ti, bio)))
drivers/md/dm.c
1138
free_tio(bio);
drivers/md/dm.c
117
unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
drivers/md/dm.c
119
return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
drivers/md/dm.c
1315
void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
drivers/md/dm.c
1317
struct dm_target_io *tio = clone_to_tio(bio);
drivers/md/dm.c
1319
unsigned int bio_sectors = bio_sectors(bio);
drivers/md/dm.c
1324
BUG_ON(bio->bi_opf & REQ_ATOMIC);
drivers/md/dm.c
1327
unlikely(bdev_is_zoned(bio->bi_bdev))) {
drivers/md/dm.c
1328
enum req_op op = bio_op(bio);
drivers/md/dm.c
1337
bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
drivers/md/dm.c
1358
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
drivers/md/dm.c
1397
static void __map_bio(struct bio *clone)
drivers/md/dm.c
1465
io->sector_offset = bio_sectors(ci->bio);
drivers/md/dm.c
1473
struct bio *bio;
drivers/md/dm.c
1482
bio = alloc_tio(ci, ti, bio_nr, len,
drivers/md/dm.c
1484
if (!bio)
drivers/md/dm.c
1487
bio_list_add(blist, bio);
drivers/md/dm.c
1494
while ((bio = bio_list_pop(blist)))
drivers/md/dm.c
1495
free_tio(bio);
drivers/md/dm.c
1503
struct bio *clone;
drivers/md/dm.c
1531
struct bio flush_bio;
drivers/md/dm.c
1545
ci->bio = &flush_bio;
drivers/md/dm.c
1572
struct bio *clone;
drivers/md/dm.c
1594
bio_uninit(ci->bio);
drivers/md/dm.c
1618
static bool is_abnormal_io(struct bio *bio)
drivers/md/dm.c
1620
switch (bio_op(bio)) {
drivers/md/dm.c
1643
switch (bio_op(ci->bio)) {
drivers/md/dm.c
1685
static inline struct dm_io **dm_poll_list_head(struct bio *bio)
drivers/md/dm.c
1687
return (struct dm_io **)&bio->bi_private;
drivers/md/dm.c
1690
static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
drivers/md/dm.c
1692
struct dm_io **head = dm_poll_list_head(bio);
drivers/md/dm.c
1694
if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
drivers/md/dm.c
1695
bio->bi_opf |= REQ_DM_POLL_LIST;
drivers/md/dm.c
1700
io->data = bio->bi_private;
drivers/md/dm.c
1703
bio->bi_cookie = ~BLK_QC_T_NONE;
drivers/md/dm.c
1723
struct bio *clone;
drivers/md/dm.c
1738
ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
drivers/md/dm.c
1741
if (ci->bio->bi_opf & REQ_ATOMIC) {
drivers/md/dm.c
1750
if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) {
drivers/md/dm.c
1769
struct dm_table *map, struct bio *bio, bool is_abnormal)
drivers/md/dm.c
1773
ci->bio = bio;
drivers/md/dm.c
1776
ci->sector = bio->bi_iter.bi_sector;
drivers/md/dm.c
1777
ci->sector_count = bio_sectors(bio);
drivers/md/dm.c
1781
WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
drivers/md/dm.c
1786
static inline bool dm_zone_bio_needs_split(struct bio *bio)
drivers/md/dm.c
1791
switch (bio_op(bio)) {
drivers/md/dm.c
1807
return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio);
drivers/md/dm.c
1810
static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
drivers/md/dm.c
1812
if (!bio_needs_zone_write_plugging(bio))
drivers/md/dm.c
1814
return blk_zone_plug_bio(bio, 0);
drivers/md/dm.c
1828
struct bio *clone;
drivers/md/dm.c
1930
static inline bool dm_zone_bio_needs_split(struct bio *bio)
drivers/md/dm.c
1934
static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
drivers/md/dm.c
1948
struct dm_table *map, struct bio *bio)
drivers/md/dm.c
1955
is_abnormal = is_abnormal_io(bio);
drivers/md/dm.c
1957
need_split = is_abnormal || dm_zone_bio_needs_split(bio);
drivers/md/dm.c
1970
bio = bio_split_to_limits(bio);
drivers/md/dm.c
1971
if (!bio)
drivers/md/dm.c
1979
if (static_branch_unlikely(&zoned_enabled) && dm_zone_plug_bio(md, bio))
drivers/md/dm.c
1983
if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) {
drivers/md/dm.c
1989
if (bio->bi_opf & REQ_PREFLUSH) {
drivers/md/dm.c
1990
bio_wouldblock_error(bio);
drivers/md/dm.c
1993
io = alloc_io(md, bio, GFP_NOWAIT);
drivers/md/dm.c
1996
bio_wouldblock_error(bio);
drivers/md/dm.c
2000
io = alloc_io(md, bio, GFP_NOIO);
drivers/md/dm.c
2002
init_clone_info(&ci, io, map, bio, is_abnormal);
drivers/md/dm.c
2004
if (unlikely((bio->bi_opf & REQ_PREFLUSH) != 0)) {
drivers/md/dm.c
2015
if (bio->bi_iter.bi_size && map->flush_bypasses_map) {
drivers/md/dm.c
2020
if (bio->bi_iter.bi_size)
drivers/md/dm.c
2029
(bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) {
drivers/md/dm.c
2041
bio_trim(bio, io->sectors, ci.sector_count);
drivers/md/dm.c
2042
trace_block_split(bio, bio->bi_iter.bi_sector);
drivers/md/dm.c
2043
bio_inc_remaining(bio);
drivers/md/dm.c
2044
submit_bio_noacct(bio);
drivers/md/dm.c
2062
dm_queue_poll_io(bio, io);
drivers/md/dm.c
2065
static void dm_submit_bio(struct bio *bio)
drivers/md/dm.c
2067
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
drivers/md/dm.c
2075
bio_io_error(bio);
drivers/md/dm.c
2081
if (bio->bi_opf & REQ_NOWAIT)
drivers/md/dm.c
2082
bio_wouldblock_error(bio);
drivers/md/dm.c
2083
else if (bio->bi_opf & REQ_RAHEAD)
drivers/md/dm.c
2084
bio_io_error(bio);
drivers/md/dm.c
2086
queue_io(md, bio);
drivers/md/dm.c
2090
dm_split_and_process_bio(md, map, bio);
drivers/md/dm.c
2108
static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
drivers/md/dm.c
2111
struct dm_io **head = dm_poll_list_head(bio);
drivers/md/dm.c
2117
if (!(bio->bi_opf & REQ_DM_POLL_LIST))
drivers/md/dm.c
2130
bio->bi_opf &= ~REQ_DM_POLL_LIST;
drivers/md/dm.c
2131
bio->bi_private = list->data;
drivers/md/dm.c
2149
bio->bi_opf |= REQ_DM_POLL_LIST;
drivers/md/dm.c
2831
struct bio *bio;
drivers/md/dm.c
2835
bio = bio_list_pop(&md->deferred);
drivers/md/dm.c
2838
if (!bio)
drivers/md/dm.c
2841
submit_bio_noacct(bio);
drivers/md/dm.c
487
u64 dm_start_time_ns_from_clone(struct bio *bio)
drivers/md/dm.c
489
return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
drivers/md/dm.c
493
static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
drivers/md/dm.c
503
return bio_sectors(bio);
drivers/md/dm.c
508
struct bio *bio = io->orig_bio;
drivers/md/dm.c
512
bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
drivers/md/dm.c
515
bdev_end_io_acct(bio->bi_bdev, bio_op(bio),
drivers/md/dm.c
516
dm_io_sectors(io, bio),
drivers/md/dm.c
525
sector = bio_end_sector(bio) - io->sector_offset;
drivers/md/dm.c
527
sector = bio->bi_iter.bi_sector;
drivers/md/dm.c
529
dm_stats_account_io(&io->md->stats, bio_data_dir(bio),
drivers/md/dm.c
530
sector, dm_io_sectors(io, bio),
drivers/md/dm.c
540
static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
drivers/md/dm.c
571
static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask)
drivers/md/dm.c
575
struct bio *clone;
drivers/md/dm.c
577
clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs);
drivers/md/dm.c
593
io->orig_bio = bio;
drivers/md/dm.c
613
static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
drivers/md/dm.c
618
struct bio *clone;
drivers/md/dm.c
626
clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
drivers/md/dm.c
659
static void free_tio(struct bio *clone)
drivers/md/dm.c
669
static void queue_io(struct mapped_device *md, struct bio *bio)
drivers/md/dm.c
674
bio_list_add(&md->deferred, bio);
drivers/md/dm.c
85
struct bio *bio;
drivers/md/dm.c
896
struct bio *bio = io->orig_bio;
drivers/md/dm.c
899
(bio->bi_opf & REQ_POLLED));
drivers/md/dm.c
906
if (bio->bi_opf & REQ_POLLED) {
drivers/md/dm.c
912
bio_clear_polled(bio);
drivers/md/dm.c
921
!WARN_ON_ONCE(dm_is_zone_write(md, bio))) ||
drivers/md/dm.c
93
static inline struct dm_target_io *clone_to_tio(struct bio *clone)
drivers/md/dm.c
943
struct bio *bio = io->orig_bio;
drivers/md/dm.c
98
void *dm_per_bio_data(struct bio *bio, size_t data_size)
drivers/md/dm.c
982
bio->bi_opf &= ~REQ_PREFLUSH;
drivers/md/dm.c
983
queue_io(md, bio);
drivers/md/dm.c
987
bio->bi_status = io_error;
drivers/md/dm.c
988
bio_endio(bio);
drivers/md/dm.h
109
void dm_zone_endio(struct dm_io *io, struct bio *clone);
drivers/md/dm.h
114
bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
drivers/md/dm.h
121
static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
drivers/md/dm.h
194
int linear_map(struct dm_target *ti, struct bio *bio);
drivers/md/dm.h
198
int stripe_map(struct dm_target *ti, struct bio *bio);
drivers/md/md-linear.c
235
static bool linear_make_request(struct mddev *mddev, struct bio *bio)
drivers/md/md-linear.c
239
sector_t bio_sector = bio->bi_iter.bi_sector;
drivers/md/md-linear.c
241
if (unlikely(bio->bi_opf & REQ_PREFLUSH)
drivers/md/md-linear.c
242
&& md_flush_request(mddev, bio))
drivers/md/md-linear.c
256
bio_io_error(bio);
drivers/md/md-linear.c
260
if (unlikely(bio_end_sector(bio) > end_sector)) {
drivers/md/md-linear.c
262
bio = bio_submit_split_bioset(bio, end_sector - bio_sector,
drivers/md/md-linear.c
264
if (!bio)
drivers/md/md-linear.c
268
md_account_bio(mddev, &bio);
drivers/md/md-linear.c
269
bio_set_dev(bio, tmp_dev->rdev->bdev);
drivers/md/md-linear.c
270
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
drivers/md/md-linear.c
273
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
drivers/md/md-linear.c
274
!bdev_max_discard_sectors(bio->bi_bdev))) {
drivers/md/md-linear.c
276
bio_endio(bio);
drivers/md/md-linear.c
279
trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
drivers/md/md-linear.c
281
mddev_check_write_zeroes(mddev, bio);
drivers/md/md-linear.c
282
submit_bio_noacct(bio);
drivers/md/md-linear.c
289
(unsigned long long)bio->bi_iter.bi_sector,
drivers/md/md-linear.c
293
bio_io_error(bio);
drivers/md/md.c
1085
static void super_written(struct bio *bio)
drivers/md/md.c
1087
struct md_rdev *rdev = bio->bi_private;
drivers/md/md.c
1090
if (bio->bi_status) {
drivers/md/md.c
1092
blk_status_to_errno(bio->bi_status));
drivers/md/md.c
1095
&& (bio->bi_opf & MD_FAILFAST)) {
drivers/md/md.c
1102
bio_put(bio);
drivers/md/md.c
1130
struct bio *bio;
drivers/md/md.c
1138
bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
drivers/md/md.c
1146
bio->bi_iter.bi_sector = sector;
drivers/md/md.c
1147
__bio_add_page(bio, page, size, offset);
drivers/md/md.c
1148
bio->bi_private = rdev;
drivers/md/md.c
1149
bio->bi_end_io = super_written;
drivers/md/md.c
1154
bio->bi_opf |= MD_FAILFAST;
drivers/md/md.c
1157
submit_bio(bio);
drivers/md/md.c
1172
struct bio bio;
drivers/md/md.c
1176
bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf);
drivers/md/md.c
1178
bio_init(&bio, rdev->bdev, &bvec, 1, opf);
drivers/md/md.c
1181
bio.bi_iter.bi_sector = sector + rdev->sb_start;
drivers/md/md.c
1185
bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
drivers/md/md.c
1187
bio.bi_iter.bi_sector = sector + rdev->data_offset;
drivers/md/md.c
1188
__bio_add_page(&bio, page, size, 0);
drivers/md/md.c
1190
submit_bio_wait(&bio);
drivers/md/md.c
1192
return !bio.bi_status;
drivers/md/md.c
382
static bool is_suspended(struct mddev *mddev, struct bio *bio)
drivers/md/md.c
386
if (bio_data_dir(bio) != WRITE)
drivers/md/md.c
390
if (bio->bi_iter.bi_sector >= READ_ONCE(mddev->suspend_hi))
drivers/md/md.c
392
if (bio_end_sector(bio) < READ_ONCE(mddev->suspend_lo))
drivers/md/md.c
397
bool md_handle_request(struct mddev *mddev, struct bio *bio)
drivers/md/md.c
400
if (is_suspended(mddev, bio)) {
drivers/md/md.c
403
if (bio->bi_opf & REQ_NOWAIT) {
drivers/md/md.c
404
bio_wouldblock_error(bio);
drivers/md/md.c
410
if (!is_suspended(mddev, bio))
drivers/md/md.c
419
if (!mddev->pers->make_request(mddev, bio)) {
drivers/md/md.c
431
static void md_submit_bio(struct bio *bio)
drivers/md/md.c
433
const int rw = bio_data_dir(bio);
drivers/md/md.c
434
struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
drivers/md/md.c
437
bio_io_error(bio);
drivers/md/md.c
442
bio_io_error(bio);
drivers/md/md.c
446
bio = bio_split_to_limits(bio);
drivers/md/md.c
447
if (!bio)
drivers/md/md.c
451
if (bio_sectors(bio) != 0)
drivers/md/md.c
452
bio->bi_status = BLK_STS_IOERR;
drivers/md/md.c
453
bio_endio(bio);
drivers/md/md.c
458
bio->bi_opf &= ~REQ_NOMERGE;
drivers/md/md.c
460
md_handle_request(mddev, bio);
drivers/md/md.c
572
static void md_end_flush(struct bio *bio)
drivers/md/md.c
574
struct bio *parent = bio->bi_private;
drivers/md/md.c
580
if (bio->bi_status)
drivers/md/md.c
581
pr_err("md: %pg flush io error %d\n", bio->bi_bdev,
drivers/md/md.c
582
blk_status_to_errno(bio->bi_status));
drivers/md/md.c
584
bio_put(bio);
drivers/md/md.c
588
bool md_flush_request(struct mddev *mddev, struct bio *bio)
drivers/md/md.c
591
struct bio *new;
drivers/md/md.c
607
new->bi_private = bio;
drivers/md/md.c
609
bio_inc_remaining(bio);
drivers/md/md.c
613
if (bio_sectors(bio) == 0) {
drivers/md/md.c
614
bio_endio(bio);
drivers/md/md.c
618
bio->bi_opf &= ~REQ_PREFLUSH;
drivers/md/md.c
9099
void md_write_start(struct mddev *mddev, struct bio *bi)
drivers/md/md.c
9150
void md_write_inc(struct mddev *mddev, struct bio *bi)
drivers/md/md.c
9178
struct bio *bio, sector_t start, sector_t size)
drivers/md/md.c
9180
struct bio *discard_bio = NULL;
drivers/md/md.c
9186
bio_chain(discard_bio, bio);
drivers/md/md.c
9187
bio_clone_blkg_association(discard_bio, bio);
drivers/md/md.c
9188
mddev_trace_remap(mddev, discard_bio, bio->bi_iter.bi_sector);
drivers/md/md.c
9216
static void md_end_clone_io(struct bio *bio)
drivers/md/md.c
9218
struct md_io_clone *md_io_clone = bio->bi_private;
drivers/md/md.c
9219
struct bio *orig_bio = md_io_clone->orig_bio;
drivers/md/md.c
9225
if (bio->bi_status && !orig_bio->bi_status)
drivers/md/md.c
9226
orig_bio->bi_status = bio->bi_status;
drivers/md/md.c
9231
bio_put(bio);
drivers/md/md.c
9236
static void md_clone_bio(struct mddev *mddev, struct bio **bio)
drivers/md/md.c
9238
struct block_device *bdev = (*bio)->bi_bdev;
drivers/md/md.c
9240
struct bio *clone =
drivers/md/md.c
9241
bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_clone_set);
drivers/md/md.c
9244
md_io_clone->orig_bio = *bio;
drivers/md/md.c
9247
md_io_clone->start_time = bio_start_io_acct(*bio);
drivers/md/md.c
9249
if (bio_data_dir(*bio) == WRITE && md_bitmap_enabled(mddev, false)) {
drivers/md/md.c
9250
md_io_clone->offset = (*bio)->bi_iter.bi_sector;
drivers/md/md.c
9251
md_io_clone->sectors = bio_sectors(*bio);
drivers/md/md.c
9252
md_io_clone->rw = op_stat_group(bio_op(*bio));
drivers/md/md.c
9258
*bio = clone;
drivers/md/md.c
9261
void md_account_bio(struct mddev *mddev, struct bio **bio)
drivers/md/md.c
9264
md_clone_bio(mddev, bio);
drivers/md/md.c
9268
void md_free_cloned_bio(struct bio *bio)
drivers/md/md.c
9270
struct md_io_clone *md_io_clone = bio->bi_private;
drivers/md/md.c
9271
struct bio *orig_bio = md_io_clone->orig_bio;
drivers/md/md.c
9277
if (bio->bi_status && !orig_bio->bi_status)
drivers/md/md.c
9278
orig_bio->bi_status = bio->bi_status;
drivers/md/md.c
9283
bio_put(bio);
drivers/md/md.h
1054
static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
drivers/md/md.h
1058
trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector);
drivers/md/md.h
750
bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
drivers/md/md.h
875
struct bio *orig_bio;
drivers/md/md.h
880
struct bio bio_clone;
drivers/md/md.h
910
extern void md_write_start(struct mddev *mddev, struct bio *bi);
drivers/md/md.h
911
extern void md_write_inc(struct mddev *mddev, struct bio *bi);
drivers/md/md.h
918
struct bio *bio, sector_t start, sector_t size);
drivers/md/md.h
919
void md_account_bio(struct mddev *mddev, struct bio **bio);
drivers/md/md.h
920
void md_free_cloned_bio(struct bio *bio);
drivers/md/md.h
922
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
drivers/md/md.h
950
extern bool md_handle_request(struct mddev *mddev, struct bio *bio);
drivers/md/md.h
990
static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
drivers/md/md.h
992
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
drivers/md/md.h
993
!bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors)
drivers/md/raid0.c
454
static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
drivers/md/raid0.c
458
sector_t start = bio->bi_iter.bi_sector;
drivers/md/raid0.c
472
if (bio_end_sector(bio) > zone->zone_end) {
drivers/md/raid0.c
473
bio = bio_submit_split_bioset(bio,
drivers/md/raid0.c
474
zone->zone_end - bio->bi_iter.bi_sector,
drivers/md/raid0.c
476
if (!bio)
drivers/md/raid0.c
481
end = bio_end_sector(bio);
drivers/md/raid0.c
548
md_submit_discard_bio(mddev, rdev, bio,
drivers/md/raid0.c
552
bio_endio(bio);
drivers/md/raid0.c
555
static void raid0_map_submit_bio(struct mddev *mddev, struct bio *bio)
drivers/md/raid0.c
560
sector_t bio_sector = bio->bi_iter.bi_sector;
drivers/md/raid0.c
563
md_account_bio(mddev, &bio);
drivers/md/raid0.c
575
bio_io_error(bio);
drivers/md/raid0.c
580
bio_io_error(bio);
drivers/md/raid0.c
585
bio_set_dev(bio, tmp_dev->bdev);
drivers/md/raid0.c
586
bio->bi_iter.bi_sector = sector + zone->dev_start +
drivers/md/raid0.c
588
mddev_trace_remap(mddev, bio, bio_sector);
drivers/md/raid0.c
589
mddev_check_write_zeroes(mddev, bio);
drivers/md/raid0.c
590
submit_bio_noacct(bio);
drivers/md/raid0.c
593
static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
drivers/md/raid0.c
599
if (unlikely(bio->bi_opf & REQ_PREFLUSH)
drivers/md/raid0.c
600
&& md_flush_request(mddev, bio))
drivers/md/raid0.c
603
if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
drivers/md/raid0.c
604
raid0_handle_discard(mddev, bio);
drivers/md/raid0.c
608
sector = bio->bi_iter.bi_sector;
drivers/md/raid0.c
616
if (sectors < bio_sectors(bio)) {
drivers/md/raid0.c
617
bio = bio_submit_split_bioset(bio, sectors,
drivers/md/raid0.c
619
if (!bio)
drivers/md/raid0.c
623
raid0_map_submit_bio(mddev, bio);
drivers/md/raid1-10.c
101
if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
drivers/md/raid1-10.c
102
bio->bi_status = BLK_STS_RESOURCE;
drivers/md/raid1-10.c
103
bio_endio(bio);
drivers/md/raid1-10.c
11
#define IO_BLOCKED ((struct bio *)1)
drivers/md/raid1-10.c
112
static inline void raid1_submit_write(struct bio *bio)
drivers/md/raid1-10.c
114
struct md_rdev *rdev = (void *)bio->bi_bdev;
drivers/md/raid1-10.c
116
bio->bi_next = NULL;
drivers/md/raid1-10.c
117
bio_set_dev(bio, rdev->bdev);
drivers/md/raid1-10.c
119
bio_io_error(bio);
drivers/md/raid1-10.c
120
else if (unlikely(bio_op(bio) == REQ_OP_DISCARD &&
drivers/md/raid1-10.c
121
!bdev_max_discard_sectors(bio->bi_bdev)))
drivers/md/raid1-10.c
123
bio_endio(bio);
drivers/md/raid1-10.c
125
submit_bio_noacct(bio);
drivers/md/raid1-10.c
128
static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
drivers/md/raid1-10.c
139
raid1_submit_write(bio);
drivers/md/raid1-10.c
148
bio_list_add(&plug->pending, bio);
drivers/md/raid1-10.c
16
#define IO_MADE_GOOD ((struct bio *)2)
drivers/md/raid1-10.c
18
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
drivers/md/raid1-10.c
297
static inline bool raid1_should_handle_error(struct bio *bio)
drivers/md/raid1-10.c
299
return !(bio->bi_opf & (REQ_RAHEAD | REQ_NOWAIT));
drivers/md/raid1-10.c
85
static inline struct resync_pages *get_resync_pages(struct bio *bio)
drivers/md/raid1-10.c
87
return bio->bi_private;
drivers/md/raid1-10.c
91
static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
drivers/md/raid1.c
1220
struct bio *bio)
drivers/md/raid1.c
1222
int size = bio->bi_iter.bi_size;
drivers/md/raid1.c
1225
struct bio *behind_bio = NULL;
drivers/md/raid1.c
1227
behind_bio = bio_alloc_bioset(NULL, vcnt, bio->bi_opf, GFP_NOIO,
drivers/md/raid1.c
1231
if (!bio_has_data(bio)) {
drivers/md/raid1.c
125
static inline struct r1bio *get_resync_r1bio(struct bio *bio)
drivers/md/raid1.c
1253
bio_copy_data(behind_bio, bio);
drivers/md/raid1.c
1262
bio->bi_iter.bi_size);
drivers/md/raid1.c
127
return get_resync_pages(bio)->raid_bio;
drivers/md/raid1.c
1273
struct bio *bio;
drivers/md/raid1.c
1286
bio = bio_list_get(&plug->pending);
drivers/md/raid1.c
1287
flush_bio_list(conf, bio);
drivers/md/raid1.c
1291
static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
drivers/md/raid1.c
1293
r1_bio->master_bio = bio;
drivers/md/raid1.c
1294
r1_bio->sectors = bio_sectors(bio);
drivers/md/raid1.c
1297
r1_bio->sector = bio->bi_iter.bi_sector;
drivers/md/raid1.c
1301
alloc_r1bio(struct mddev *mddev, struct bio *bio)
drivers/md/raid1.c
1308
init_r1bio(r1_bio, mddev, bio);
drivers/md/raid1.c
1312
static void raid1_read_request(struct mddev *mddev, struct bio *bio,
drivers/md/raid1.c
1317
struct bio *read_bio;
drivers/md/raid1.c
1333
if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
drivers/md/raid1.c
1334
bio->bi_opf & REQ_NOWAIT)) {
drivers/md/raid1.c
1335
bio_wouldblock_error(bio);
drivers/md/raid1.c
1340
r1_bio = alloc_r1bio(mddev, bio);
drivers/md/raid1.c
1342
init_r1bio(r1_bio, mddev, bio);
drivers/md/raid1.c
1378
if (max_sectors < bio_sectors(bio)) {
drivers/md/raid1.c
1379
bio = bio_submit_split_bioset(bio, max_sectors,
drivers/md/raid1.c
1381
if (!bio) {
drivers/md/raid1.c
1386
r1_bio->master_bio = bio;
drivers/md/raid1.c
1392
md_account_bio(mddev, &bio);
drivers/md/raid1.c
1393
r1_bio->master_bio = bio;
drivers/md/raid1.c
1395
read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
drivers/md/raid1.c
1416
static bool wait_blocked_rdev(struct mddev *mddev, struct bio *bio)
drivers/md/raid1.c
1431
rdev_has_badblock(rdev, bio->bi_iter.bi_sector,
drivers/md/raid1.c
1432
bio_sectors(bio)) < 0)
drivers/md/raid1.c
1436
if (bio->bi_opf & REQ_NOWAIT)
drivers/md/raid1.c
1451
struct bio *bio)
drivers/md/raid1.c
1467
alloc_behind_master_bio(r1_bio, bio);
drivers/md/raid1.c
1474
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
drivers/md/raid1.c
1484
bool is_discard = (bio_op(bio) == REQ_OP_DISCARD);
drivers/md/raid1.c
1488
bio->bi_iter.bi_sector, bio_end_sector(bio))) {
drivers/md/raid1.c
149
struct bio *bio;
drivers/md/raid1.c
1491
if (bio->bi_opf & REQ_NOWAIT) {
drivers/md/raid1.c
1492
bio_wouldblock_error(bio);
drivers/md/raid1.c
1499
bio->bi_iter.bi_sector,
drivers/md/raid1.c
1500
bio_end_sector(bio)))
drivers/md/raid1.c
1512
if (!wait_barrier(conf, bio->bi_iter.bi_sector,
drivers/md/raid1.c
1513
bio->bi_opf & REQ_NOWAIT)) {
drivers/md/raid1.c
1514
bio_wouldblock_error(bio);
drivers/md/raid1.c
1518
if (!wait_blocked_rdev(mddev, bio)) {
drivers/md/raid1.c
1519
bio_wouldblock_error(bio);
drivers/md/raid1.c
1523
r1_bio = alloc_r1bio(mddev, bio);
drivers/md/raid1.c
1583
if (bio->bi_opf & REQ_ATOMIC)
drivers/md/raid1.c
1591
r1_bio->bios[i] = bio;
drivers/md/raid1.c
1603
if (max_sectors < bio_sectors(bio)) {
drivers/md/raid1.c
1604
bio = bio_submit_split_bioset(bio, max_sectors,
drivers/md/raid1.c
1606
if (!bio) {
drivers/md/raid1.c
1611
r1_bio->master_bio = bio;
drivers/md/raid1.c
1615
md_account_bio(mddev, &bio);
drivers/md/raid1.c
1616
r1_bio->master_bio = bio;
drivers/md/raid1.c
1623
struct bio *mbio = NULL;
drivers/md/raid1.c
1630
raid1_start_write_behind(mddev, r1_bio, bio);
drivers/md/raid1.c
1643
mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
drivers/md/raid1.c
166
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
drivers/md/raid1.c
167
if (!bio)
drivers/md/raid1.c
1689
static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
drivers/md/raid1.c
169
bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
drivers/md/raid1.c
1693
if (unlikely(bio->bi_opf & REQ_PREFLUSH)
drivers/md/raid1.c
1694
&& md_flush_request(mddev, bio))
drivers/md/raid1.c
170
r1_bio->bios[j] = bio;
drivers/md/raid1.c
1705
bio->bi_iter.bi_sector, bio_sectors(bio));
drivers/md/raid1.c
1707
if (bio_data_dir(bio) == READ)
drivers/md/raid1.c
1708
raid1_read_request(mddev, bio, sectors, NULL);
drivers/md/raid1.c
1710
md_write_start(mddev,bio);
drivers/md/raid1.c
1711
raid1_write_request(mddev, bio, sectors);
drivers/md/raid1.c
185
bio = r1_bio->bios[j];
drivers/md/raid1.c
196
bio->bi_private = rp;
drivers/md/raid1.c
2027
static void end_sync_read(struct bio *bio)
drivers/md/raid1.c
2029
struct r1bio *r1_bio = get_resync_r1bio(bio);
drivers/md/raid1.c
2038
if (!bio->bi_status)
drivers/md/raid1.c
2075
static void end_sync_write(struct bio *bio)
drivers/md/raid1.c
2077
struct r1bio *r1_bio = get_resync_r1bio(bio);
drivers/md/raid1.c
2080
struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
drivers/md/raid1.c
2082
if (bio->bi_status) {
drivers/md/raid1.c
2131
struct bio *bio = r1_bio->bios[r1_bio->read_disk];
drivers/md/raid1.c
2132
struct page **pages = get_resync_pages(bio)->pages;
drivers/md/raid1.c
2147
bio->bi_end_io = end_sync_write;
drivers/md/raid1.c
2185
mdname(mddev), bio->bi_bdev,
drivers/md/raid1.c
2238
bio->bi_status = 0;
drivers/md/raid1.c
2261
struct bio *b = r1_bio->bios[i];
drivers/md/raid1.c
2288
struct bio *pbio = r1_bio->bios[primary];
drivers/md/raid1.c
2289
struct bio *sbio = r1_bio->bios[i];
drivers/md/raid1.c
2333
struct bio *wbio;
drivers/md/raid1.c
244
struct bio **bio = r1_bio->bios + i;
drivers/md/raid1.c
245
if (!BIO_SPECIAL(*bio))
drivers/md/raid1.c
246
bio_put(*bio);
drivers/md/raid1.c
247
*bio = NULL;
drivers/md/raid1.c
2513
struct bio *wbio;
drivers/md/raid1.c
2557
struct bio *bio = r1_bio->bios[m];
drivers/md/raid1.c
2558
if (bio->bi_end_io == NULL)
drivers/md/raid1.c
2560
if (!bio->bi_status &&
drivers/md/raid1.c
2563
if (bio->bi_status &&
drivers/md/raid1.c
2615
struct bio *bio;
drivers/md/raid1.c
2629
bio = r1_bio->bios[r1_bio->read_disk];
drivers/md/raid1.c
2630
bio_put(bio);
drivers/md/raid1.c
2647
bio = r1_bio->master_bio;
drivers/md/raid1.c
2651
raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
drivers/md/raid1.c
266
struct bio *bio = r1_bio->bios[i];
drivers/md/raid1.c
267
if (bio->bi_end_io)
drivers/md/raid1.c
2740
struct bio *bio;
drivers/md/raid1.c
2744
bio = r1bio->bios[i];
drivers/md/raid1.c
2745
rps = bio->bi_private;
drivers/md/raid1.c
2746
bio_reset(bio, NULL, 0);
drivers/md/raid1.c
2747
bio->bi_private = rps;
drivers/md/raid1.c
2768
struct bio *bio;
drivers/md/raid1.c
2864
bio = r1_bio->bios[i];
drivers/md/raid1.c
2872
bio->bi_opf = REQ_OP_WRITE;
drivers/md/raid1.c
2873
bio->bi_end_io = end_sync_write;
drivers/md/raid1.c
2899
bio->bi_opf = REQ_OP_READ;
drivers/md/raid1.c
2900
bio->bi_end_io = end_sync_read;
drivers/md/raid1.c
2911
bio->bi_opf = REQ_OP_WRITE;
drivers/md/raid1.c
2912
bio->bi_end_io = end_sync_write;
drivers/md/raid1.c
2916
if (rdev && bio->bi_end_io) {
drivers/md/raid1.c
2918
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
drivers/md/raid1.c
2919
bio_set_dev(bio, rdev->bdev);
drivers/md/raid1.c
2921
bio->bi_opf |= MD_FAILFAST;
drivers/md/raid1.c
300
struct bio *bio = r1_bio->master_bio;
drivers/md/raid1.c
3002
bio = r1_bio->bios[i];
drivers/md/raid1.c
3003
rp = get_resync_pages(bio);
drivers/md/raid1.c
3004
if (bio->bi_end_io) {
drivers/md/raid1.c
3011
__bio_add_page(bio, page, len, 0);
drivers/md/raid1.c
303
bio->bi_status = BLK_STS_IOERR;
drivers/md/raid1.c
3037
bio = r1_bio->bios[i];
drivers/md/raid1.c
3038
if (bio->bi_end_io == end_sync_read) {
drivers/md/raid1.c
3041
bio->bi_opf &= ~MD_FAILFAST;
drivers/md/raid1.c
3042
submit_bio_noacct(bio);
drivers/md/raid1.c
3047
bio = r1_bio->bios[r1_bio->read_disk];
drivers/md/raid1.c
3049
bio->bi_opf &= ~MD_FAILFAST;
drivers/md/raid1.c
305
bio_endio(bio);
drivers/md/raid1.c
3050
submit_bio_noacct(bio);
drivers/md/raid1.c
310
struct bio *bio = r1_bio->master_bio;
drivers/md/raid1.c
317
(bio_data_dir(bio) == WRITE) ? "write" : "read",
drivers/md/raid1.c
318
(unsigned long long) bio->bi_iter.bi_sector,
drivers/md/raid1.c
319
(unsigned long long) bio_end_sector(bio) - 1);
drivers/md/raid1.c
346
static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
drivers/md/raid1.c
353
if (r1_bio->bios[mirror] == bio)
drivers/md/raid1.c
362
static void raid1_end_read_request(struct bio *bio)
drivers/md/raid1.c
364
int uptodate = !bio->bi_status;
drivers/md/raid1.c
365
struct r1bio *r1_bio = bio->bi_private;
drivers/md/raid1.c
381
} else if (!raid1_should_handle_error(bio)) {
drivers/md/raid1.c
446
static void raid1_end_write_request(struct bio *bio)
drivers/md/raid1.c
448
struct r1bio *r1_bio = bio->bi_private;
drivers/md/raid1.c
451
struct bio *to_put = NULL;
drivers/md/raid1.c
452
int mirror = find_bio_disk(r1_bio, bio);
drivers/md/raid1.c
456
bool ignore_error = !raid1_should_handle_error(bio) ||
drivers/md/raid1.c
457
(bio->bi_status && bio_op(bio) == REQ_OP_DISCARD);
drivers/md/raid1.c
462
if (bio->bi_status && !ignore_error) {
drivers/md/raid1.c
469
(bio->bi_opf & MD_FAILFAST) &&
drivers/md/raid1.c
484
to_put = bio;
drivers/md/raid1.c
498
to_put = bio;
drivers/md/raid1.c
536
struct bio *mbio = r1_bio->master_bio;
drivers/md/raid1.c
899
static void flush_bio_list(struct r1conf *conf, struct bio *bio)
drivers/md/raid1.c
905
while (bio) { /* submit pending writes */
drivers/md/raid1.c
906
struct bio *next = bio->bi_next;
drivers/md/raid1.c
908
raid1_submit_write(bio);
drivers/md/raid1.c
909
bio = next;
drivers/md/raid1.c
923
struct bio *bio;
drivers/md/raid1.c
925
bio = bio_list_get(&conf->pending_bio_list);
drivers/md/raid1.c
939
flush_bio_list(conf, bio);
drivers/md/raid1.h
140
struct bio *master_bio;
drivers/md/raid1.h
152
struct bio *behind_master_bio;
drivers/md/raid1.h
158
struct bio *bios[];
drivers/md/raid10.c
101
static inline struct r10bio *get_resync_r10bio(struct bio *bio)
drivers/md/raid10.c
103
return get_resync_pages(bio)->raid_bio;
drivers/md/raid10.c
1089
struct bio *bio;
drivers/md/raid10.c
1102
bio = bio_list_get(&plug->pending);
drivers/md/raid10.c
1106
while (bio) { /* submit pending writes */
drivers/md/raid10.c
1107
struct bio *next = bio->bi_next;
drivers/md/raid10.c
1109
raid1_submit_write(bio);
drivers/md/raid10.c
1110
bio = next;
drivers/md/raid10.c
1123
struct bio *bio, sector_t sectors)
drivers/md/raid10.c
1126
if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
drivers/md/raid10.c
1127
bio_wouldblock_error(bio);
drivers/md/raid10.c
1131
bio->bi_iter.bi_sector < conf->reshape_progress &&
drivers/md/raid10.c
1132
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
drivers/md/raid10.c
1134
if (bio->bi_opf & REQ_NOWAIT) {
drivers/md/raid10.c
1135
bio_wouldblock_error(bio);
drivers/md/raid10.c
1140
conf->reshape_progress <= bio->bi_iter.bi_sector ||
drivers/md/raid10.c
1141
conf->reshape_progress >= bio->bi_iter.bi_sector +
drivers/md/raid10.c
1148
static void raid10_read_request(struct mddev *mddev, struct bio *bio,
drivers/md/raid10.c
1152
struct bio *read_bio;
drivers/md/raid10.c
1186
if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) {
drivers/md/raid10.c
1206
if (max_sectors < bio_sectors(bio)) {
drivers/md/raid10.c
1208
bio = bio_submit_split_bioset(bio, max_sectors,
drivers/md/raid10.c
1211
if (!bio) {
drivers/md/raid10.c
1216
r10_bio->master_bio = bio;
drivers/md/raid10.c
1222
md_account_bio(mddev, &bio);
drivers/md/raid10.c
1223
r10_bio->master_bio = bio;
drivers/md/raid10.c
1225
read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
drivers/md/raid10.c
1228
r10_bio->devs[slot].bio = read_bio;
drivers/md/raid10.c
1247
struct bio *bio, bool replacement,
drivers/md/raid10.c
1254
struct bio *mbio;
drivers/md/raid10.c
1259
mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
drivers/md/raid10.c
1264
r10_bio->devs[n_copy].bio = mbio;
drivers/md/raid10.c
1344
static void raid10_write_request(struct mddev *mddev, struct bio *bio,
drivers/md/raid10.c
135
struct bio *bio;
drivers/md/raid10.c
1354
bio->bi_iter.bi_sector,
drivers/md/raid10.c
1355
bio_end_sector(bio)))) {
drivers/md/raid10.c
1358
if (bio->bi_opf & REQ_NOWAIT) {
drivers/md/raid10.c
1359
bio_wouldblock_error(bio);
drivers/md/raid10.c
1366
bio->bi_iter.bi_sector, bio_end_sector(bio)))
drivers/md/raid10.c
1374
if (!regular_request_wait(mddev, conf, bio, sectors)) {
drivers/md/raid10.c
1381
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
drivers/md/raid10.c
1382
bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
drivers/md/raid10.c
1383
: (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
drivers/md/raid10.c
1384
bio->bi_iter.bi_sector < conf->reshape_progress))) {
drivers/md/raid10.c
1390
if (bio->bi_opf & REQ_NOWAIT) {
drivers/md/raid10.c
1392
bio_wouldblock_error(bio);
drivers/md/raid10.c
1431
r10_bio->devs[i].bio = NULL;
drivers/md/raid10.c
1464
if (bio->bi_opf & REQ_ATOMIC)
drivers/md/raid10.c
1473
r10_bio->devs[i].bio = bio;
drivers/md/raid10.c
1477
r10_bio->devs[i].repl_bio = bio;
drivers/md/raid10.c
1485
if (r10_bio->sectors < bio_sectors(bio)) {
drivers/md/raid10.c
1487
bio = bio_submit_split_bioset(bio, r10_bio->sectors,
drivers/md/raid10.c
1490
if (!bio) {
drivers/md/raid10.c
1495
r10_bio->master_bio = bio;
drivers/md/raid10.c
1498
md_account_bio(mddev, &bio);
drivers/md/raid10.c
1499
r10_bio->master_bio = bio;
drivers/md/raid10.c
1503
if (r10_bio->devs[i].bio)
drivers/md/raid10.c
1504
raid10_write_one_disk(mddev, r10_bio, bio, false, i);
drivers/md/raid10.c
1506
raid10_write_one_disk(mddev, r10_bio, bio, true, i);
drivers/md/raid10.c
1516
if (r10_bio->devs[k].bio) {
drivers/md/raid10.c
1518
r10_bio->devs[k].bio = NULL;
drivers/md/raid10.c
1529
static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
drivers/md/raid10.c
1536
r10_bio->master_bio = bio;
drivers/md/raid10.c
1540
r10_bio->sector = bio->bi_iter.bi_sector;
drivers/md/raid10.c
1546
if (bio_data_dir(bio) == READ)
drivers/md/raid10.c
1547
raid10_read_request(mddev, bio, r10_bio, true);
drivers/md/raid10.c
1549
raid10_write_request(mddev, bio, r10_bio);
drivers/md/raid10.c
1574
static void raid10_end_discard_request(struct bio *bio)
drivers/md/raid10.c
1576
struct r10bio *r10_bio = bio->bi_private;
drivers/md/raid10.c
1588
dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
drivers/md/raid10.c
1602
static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
drivers/md/raid10.c
1609
struct bio *split;
drivers/md/raid10.c
1626
if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
drivers/md/raid10.c
1627
bio_wouldblock_error(bio);
drivers/md/raid10.c
163
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
drivers/md/raid10.c
164
if (!bio)
drivers/md/raid10.c
1646
bio_start = bio->bi_iter.bi_sector;
drivers/md/raid10.c
1647
bio_end = bio_end_sector(bio);
drivers/md/raid10.c
1657
if (bio_sectors(bio) < stripe_size*2)
drivers/md/raid10.c
166
bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
drivers/md/raid10.c
1666
split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
drivers/md/raid10.c
1668
bio->bi_status = errno_to_blk_status(PTR_ERR(split));
drivers/md/raid10.c
1669
bio_endio(bio);
drivers/md/raid10.c
167
r10_bio->devs[j].bio = bio;
drivers/md/raid10.c
1673
bio_chain(split, bio);
drivers/md/raid10.c
1674
trace_block_split(split, bio->bi_iter.bi_sector);
drivers/md/raid10.c
1682
split_size = bio_sectors(bio) - remainder;
drivers/md/raid10.c
1683
split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
drivers/md/raid10.c
1685
bio->bi_status = errno_to_blk_status(PTR_ERR(split));
drivers/md/raid10.c
1686
bio_endio(bio);
drivers/md/raid10.c
1690
bio_chain(split, bio);
drivers/md/raid10.c
1691
trace_block_split(split, bio->bi_iter.bi_sector);
drivers/md/raid10.c
1694
submit_bio_noacct(bio);
drivers/md/raid10.c
1695
bio = split;
drivers/md/raid10.c
1699
bio_start = bio->bi_iter.bi_sector;
drivers/md/raid10.c
170
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
drivers/md/raid10.c
1700
bio_end = bio_end_sector(bio);
drivers/md/raid10.c
171
if (!bio)
drivers/md/raid10.c
173
bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
drivers/md/raid10.c
174
r10_bio->devs[j].repl_bio = bio;
drivers/md/raid10.c
1741
md_account_bio(mddev, &bio);
drivers/md/raid10.c
1742
r10_bio->master_bio = bio;
drivers/md/raid10.c
1747
r10_bio->master_bio = (struct bio *)first_r10bio;
drivers/md/raid10.c
1759
r10_bio->devs[disk].bio = NULL;
drivers/md/raid10.c
1770
r10_bio->devs[disk].bio = bio;
drivers/md/raid10.c
1774
r10_bio->devs[disk].repl_bio = bio;
drivers/md/raid10.c
1782
struct bio *mbio, *rbio = NULL;
drivers/md/raid10.c
181
struct bio *rbio = r10_bio->devs[j].repl_bio;
drivers/md/raid10.c
1816
if (r10_bio->devs[disk].bio) {
drivers/md/raid10.c
1818
mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
drivers/md/raid10.c
1822
r10_bio->devs[disk].bio = mbio;
drivers/md/raid10.c
1832
rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
drivers/md/raid10.c
1865
static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
drivers/md/raid10.c
1870
int sectors = bio_sectors(bio);
drivers/md/raid10.c
1872
if (unlikely(bio->bi_opf & REQ_PREFLUSH)
drivers/md/raid10.c
1873
&& md_flush_request(mddev, bio))
drivers/md/raid10.c
1876
md_write_start(mddev, bio);
drivers/md/raid10.c
1878
if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
drivers/md/raid10.c
1879
if (!raid10_handle_discard(mddev, bio))
drivers/md/raid10.c
188
bio = r10_bio->devs[j].bio;
drivers/md/raid10.c
1886
if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
drivers/md/raid10.c
1892
(bio->bi_iter.bi_sector &
drivers/md/raid10.c
1894
__make_request(mddev, bio, sectors);
drivers/md/raid10.c
200
bio->bi_private = rp;
drivers/md/raid10.c
216
if (r10_bio->devs[j].bio)
drivers/md/raid10.c
217
bio_uninit(r10_bio->devs[j].bio);
drivers/md/raid10.c
218
kfree(r10_bio->devs[j].bio);
drivers/md/raid10.c
2219
static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
drivers/md/raid10.c
2223
if (!bio->bi_status)
drivers/md/raid10.c
2245
static void end_sync_read(struct bio *bio)
drivers/md/raid10.c
2247
struct r10bio *r10_bio = get_resync_r10bio(bio);
drivers/md/raid10.c
2249
int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
drivers/md/raid10.c
2251
__end_sync_read(r10_bio, bio, d);
drivers/md/raid10.c
2254
static void end_reshape_read(struct bio *bio)
drivers/md/raid10.c
2257
struct r10bio *r10_bio = bio->bi_private;
drivers/md/raid10.c
2259
__end_sync_read(r10_bio, bio, r10_bio->read_slot);
drivers/md/raid10.c
2289
static void end_sync_write(struct bio *bio)
drivers/md/raid10.c
2291
struct r10bio *r10_bio = get_resync_r10bio(bio);
drivers/md/raid10.c
2299
d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
drivers/md/raid10.c
2305
if (bio->bi_status) {
drivers/md/raid10.c
2345
struct bio *tbio, *fbio;
drivers/md/raid10.c
2353
if (!r10_bio->devs[i].bio->bi_status)
drivers/md/raid10.c
2360
fbio = r10_bio->devs[i].bio;
drivers/md/raid10.c
237
struct bio *bio = r10bio->devs[j].bio;
drivers/md/raid10.c
2372
tbio = r10_bio->devs[i].bio;
drivers/md/raid10.c
2382
if (!r10_bio->devs[i].bio->bi_status) {
drivers/md/raid10.c
239
if (bio) {
drivers/md/raid10.c
240
rp = get_resync_pages(bio);
drivers/md/raid10.c
242
bio_uninit(bio);
drivers/md/raid10.c
243
kfree(bio);
drivers/md/raid10.c
2442
if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
drivers/md/raid10.c
2443
&& r10_bio->devs[i].bio != fbio)
drivers/md/raid10.c
246
bio = r10bio->devs[j].repl_bio;
drivers/md/raid10.c
247
if (bio) {
drivers/md/raid10.c
2477
struct bio *bio = r10_bio->devs[0].bio;
drivers/md/raid10.c
248
bio_uninit(bio);
drivers/md/raid10.c
2483
struct page **pages = get_resync_pages(bio)->pages;
drivers/md/raid10.c
249
kfree(bio);
drivers/md/raid10.c
2551
struct bio *wbio = r10_bio->devs[1].bio;
drivers/md/raid10.c
2552
struct bio *wbio2 = r10_bio->devs[1].repl_bio;
drivers/md/raid10.c
2631
r10_bio->devs[slot].bio = IO_BLOCKED;
drivers/md/raid10.c
264
struct bio **bio = & r10_bio->devs[i].bio;
drivers/md/raid10.c
265
if (!BIO_SPECIAL(*bio))
drivers/md/raid10.c
266
bio_put(*bio);
drivers/md/raid10.c
267
*bio = NULL;
drivers/md/raid10.c
268
bio = &r10_bio->devs[i].repl_bio;
drivers/md/raid10.c
2682
r10_bio->devs[slot].bio
drivers/md/raid10.c
269
if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
drivers/md/raid10.c
270
bio_put(*bio);
drivers/md/raid10.c
271
*bio = NULL;
drivers/md/raid10.c
2770
struct bio *bio = r10_bio->master_bio;
drivers/md/raid10.c
2801
struct bio *wbio;
drivers/md/raid10.c
2806
wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
drivers/md/raid10.c
2808
bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
drivers/md/raid10.c
2834
struct bio *bio;
drivers/md/raid10.c
2846
bio = r10_bio->devs[slot].bio;
drivers/md/raid10.c
2847
bio_put(bio);
drivers/md/raid10.c
2848
r10_bio->devs[slot].bio = NULL;
drivers/md/raid10.c
2851
r10_bio->devs[slot].bio = IO_BLOCKED;
drivers/md/raid10.c
2885
if (r10_bio->devs[m].bio == NULL ||
drivers/md/raid10.c
2886
r10_bio->devs[m].bio->bi_end_io == NULL)
drivers/md/raid10.c
2888
if (!r10_bio->devs[m].bio->bi_status)
drivers/md/raid10.c
2917
struct bio *bio = r10_bio->devs[m].bio;
drivers/md/raid10.c
2919
if (bio == IO_MADE_GOOD) {
drivers/md/raid10.c
2925
} else if (bio != NULL && bio->bi_status) {
drivers/md/raid10.c
2930
bio = r10_bio->devs[m].repl_bio;
drivers/md/raid10.c
2932
if (rdev && bio == IO_MADE_GOOD) {
drivers/md/raid10.c
3054
struct bio *bio;
drivers/md/raid10.c
3065
bio = r10bio->devs[i].bio;
drivers/md/raid10.c
3066
rp = bio->bi_private;
drivers/md/raid10.c
3067
bio_reset(bio, NULL, 0);
drivers/md/raid10.c
3068
bio->bi_private = rp;
drivers/md/raid10.c
3069
bio = r10bio->devs[i].repl_bio;
drivers/md/raid10.c
3070
if (bio) {
drivers/md/raid10.c
3071
rp = bio->bi_private;
drivers/md/raid10.c
3072
bio_reset(bio, NULL, 0);
drivers/md/raid10.c
3073
bio->bi_private = rp;
drivers/md/raid10.c
3153
struct bio *biolist = NULL, *bio;
drivers/md/raid10.c
322
struct bio *bio = r10_bio->master_bio;
drivers/md/raid10.c
327
bio->bi_status = BLK_STS_IOERR;
drivers/md/raid10.c
328
bio_endio(bio);
drivers/md/raid10.c
3331
r10_bio->master_bio = (struct bio*)rb2;
drivers/md/raid10.c
3381
bio = r10_bio->devs[0].bio;
drivers/md/raid10.c
3382
bio->bi_next = biolist;
drivers/md/raid10.c
3383
biolist = bio;
drivers/md/raid10.c
3384
bio->bi_end_io = end_sync_read;
drivers/md/raid10.c
3385
bio->bi_opf = REQ_OP_READ;
drivers/md/raid10.c
3387
bio->bi_opf |= MD_FAILFAST;
drivers/md/raid10.c
3389
bio->bi_iter.bi_sector = from_addr +
drivers/md/raid10.c
3391
bio_set_dev(bio, rdev->bdev);
drivers/md/raid10.c
3406
bio = r10_bio->devs[1].bio;
drivers/md/raid10.c
3407
bio->bi_next = biolist;
drivers/md/raid10.c
3408
biolist = bio;
drivers/md/raid10.c
3409
bio->bi_end_io = end_sync_write;
drivers/md/raid10.c
3410
bio->bi_opf = REQ_OP_WRITE;
drivers/md/raid10.c
3411
bio->bi_iter.bi_sector = to_addr
drivers/md/raid10.c
3413
bio_set_dev(bio, mrdev->bdev);
drivers/md/raid10.c
3416
r10_bio->devs[1].bio->bi_end_io = NULL;
drivers/md/raid10.c
3419
bio = r10_bio->devs[1].repl_bio;
drivers/md/raid10.c
3420
if (bio)
drivers/md/raid10.c
3421
bio->bi_end_io = NULL;
drivers/md/raid10.c
3428
bio->bi_next = biolist;
drivers/md/raid10.c
3429
biolist = bio;
drivers/md/raid10.c
3430
bio->bi_end_io = end_sync_write;
drivers/md/raid10.c
3431
bio->bi_opf = REQ_OP_WRITE;
drivers/md/raid10.c
3432
bio->bi_iter.bi_sector = to_addr +
drivers/md/raid10.c
3434
bio_set_dev(bio, mreplace->bdev);
drivers/md/raid10.c
3477
if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
drivers/md/raid10.c
3491
r10_bio->devs[0].bio->bi_opf
drivers/md/raid10.c
355
struct bio *bio, int *slotp, int *replp)
drivers/md/raid10.c
3554
bio = r10_bio->devs[i].bio;
drivers/md/raid10.c
3555
bio->bi_status = BLK_STS_IOERR;
drivers/md/raid10.c
3574
bio->bi_next = biolist;
drivers/md/raid10.c
3575
biolist = bio;
drivers/md/raid10.c
3576
bio->bi_end_io = end_sync_read;
drivers/md/raid10.c
3577
bio->bi_opf = REQ_OP_READ;
drivers/md/raid10.c
3579
bio->bi_opf |= MD_FAILFAST;
drivers/md/raid10.c
3580
bio->bi_iter.bi_sector = sector + rdev->data_offset;
drivers/md/raid10.c
3581
bio_set_dev(bio, rdev->bdev);
drivers/md/raid10.c
3591
bio = r10_bio->devs[i].repl_bio;
drivers/md/raid10.c
3592
bio->bi_status = BLK_STS_IOERR;
drivers/md/raid10.c
3595
bio->bi_next = biolist;
drivers/md/raid10.c
3596
biolist = bio;
drivers/md/raid10.c
3597
bio->bi_end_io = end_sync_write;
drivers/md/raid10.c
3598
bio->bi_opf = REQ_OP_WRITE;
drivers/md/raid10.c
3600
bio->bi_opf |= MD_FAILFAST;
drivers/md/raid10.c
3601
bio->bi_iter.bi_sector = sector + rdev->data_offset;
drivers/md/raid10.c
3602
bio_set_dev(bio, rdev->bdev);
drivers/md/raid10.c
3609
if (r10_bio->devs[i].bio->bi_end_io)
drivers/md/raid10.c
361
if (r10_bio->devs[slot].bio == bio)
drivers/md/raid10.c
363
if (r10_bio->devs[slot].repl_bio == bio) {
drivers/md/raid10.c
3634
for (bio= biolist ; bio ; bio=bio->bi_next) {
drivers/md/raid10.c
3635
struct resync_pages *rp = get_resync_pages(bio);
drivers/md/raid10.c
3637
if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
drivers/md/raid10.c
3638
bio->bi_status = BLK_STS_RESOURCE;
drivers/md/raid10.c
3639
bio_endio(bio);
drivers/md/raid10.c
3696
bio = biolist;
drivers/md/raid10.c
3699
bio->bi_next = NULL;
drivers/md/raid10.c
3700
r10_bio = get_resync_r10bio(bio);
drivers/md/raid10.c
3703
if (bio->bi_end_io == end_sync_read) {
drivers/md/raid10.c
3704
bio->bi_status = 0;
drivers/md/raid10.c
3705
submit_bio_noacct(bio);
drivers/md/raid10.c
378
static void raid10_end_read_request(struct bio *bio)
drivers/md/raid10.c
380
int uptodate = !bio->bi_status;
drivers/md/raid10.c
381
struct r10bio *r10_bio = bio->bi_private;
drivers/md/raid10.c
404
} else if (!raid1_should_handle_error(bio)) {
drivers/md/raid10.c
454
static void raid10_end_write_request(struct bio *bio)
drivers/md/raid10.c
456
struct r10bio *r10_bio = bio->bi_private;
drivers/md/raid10.c
4606
struct bio *blist;
drivers/md/raid10.c
4607
struct bio *bio, *read_bio;
drivers/md/raid10.c
462
struct bio *to_put = NULL;
drivers/md/raid10.c
463
bool ignore_error = !raid1_should_handle_error(bio) ||
drivers/md/raid10.c
464
(bio->bi_status && bio_op(bio) == REQ_OP_DISCARD);
drivers/md/raid10.c
466
dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
drivers/md/raid10.c
4767
struct bio *b;
drivers/md/raid10.c
4775
b = r10_bio->devs[s/2].bio;
drivers/md/raid10.c
478
if (bio->bi_status && !ignore_error) {
drivers/md/raid10.c
4792
pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
drivers/md/raid10.c
4798
for (bio = blist; bio ; bio = bio->bi_next) {
drivers/md/raid10.c
4799
if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
drivers/md/raid10.c
4800
bio->bi_status = BLK_STS_RESOURCE;
drivers/md/raid10.c
4801
bio_endio(bio);
drivers/md/raid10.c
4857
struct bio *b;
drivers/md/raid10.c
4865
b = r10_bio->devs[s/2].bio;
drivers/md/raid10.c
492
(bio->bi_opf & MD_FAILFAST)) {
drivers/md/raid10.c
4926
pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
drivers/md/raid10.c
4979
static void end_reshape_write(struct bio *bio)
drivers/md/raid10.c
4981
struct r10bio *r10_bio = get_resync_r10bio(bio);
drivers/md/raid10.c
4989
d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
drivers/md/raid10.c
4993
if (bio->bi_status) {
drivers/md/raid10.c
504
r10_bio->devs[slot].bio = NULL;
drivers/md/raid10.c
505
to_put = bio;
drivers/md/raid10.c
534
bio_put(bio);
drivers/md/raid10.c
538
r10_bio->devs[slot].bio = IO_MADE_GOOD;
drivers/md/raid10.c
759
if (r10_bio->devs[slot].bio == IO_BLOCKED)
drivers/md/raid10.c
77
static void end_reshape_write(struct bio *bio);
drivers/md/raid10.c
870
struct bio *bio;
drivers/md/raid10.c
872
bio = bio_list_get(&conf->pending_bio_list);
drivers/md/raid10.c
890
while (bio) { /* submit pending writes */
drivers/md/raid10.c
891
struct bio *next = bio->bi_next;
drivers/md/raid10.c
893
raid1_submit_write(bio);
drivers/md/raid10.c
894
bio = next;
drivers/md/raid10.h
125
struct bio *master_bio;
drivers/md/raid10.h
141
struct bio *bio;
drivers/md/raid10.h
143
struct bio *repl_bio; /* used for resync and
drivers/md/raid5-cache.c
1096
int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
drivers/md/raid5-cache.c
1106
if (bio->bi_iter.bi_size == 0) {
drivers/md/raid5-cache.c
1107
bio_endio(bio);
drivers/md/raid5-cache.c
1110
bio->bi_opf &= ~REQ_PREFLUSH;
drivers/md/raid5-cache.c
1113
if (bio->bi_iter.bi_size == 0) {
drivers/md/raid5-cache.c
1116
bio_list_add(&log->current_io->flush_barriers, bio);
drivers/md/raid5-cache.c
114
struct bio flush_bio;
drivers/md/raid5-cache.c
1252
static void r5l_log_flush_endio(struct bio *bio)
drivers/md/raid5-cache.c
1254
struct r5l_log *log = container_of(bio, struct r5l_log,
drivers/md/raid5-cache.c
1259
if (bio->bi_status)
drivers/md/raid5-cache.c
1261
bio_uninit(bio);
drivers/md/raid5-cache.c
1668
struct bio bio;
drivers/md/raid5-cache.c
1671
bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
drivers/md/raid5-cache.c
1673
bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
drivers/md/raid5-cache.c
1679
__bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
drivers/md/raid5-cache.c
1689
ret = submit_bio_wait(&bio);
drivers/md/raid5-cache.c
1690
bio_uninit(&bio);
drivers/md/raid5-cache.c
215
struct bio *current_bio;/* current_bio accepting new data */
drivers/md/raid5-cache.c
226
struct bio *split_bio;
drivers/md/raid5-cache.c
294
struct bio *wbi, *wbi2;
drivers/md/raid5-cache.c
557
static void r5l_log_endio(struct bio *bio)
drivers/md/raid5-cache.c
559
struct r5l_io_unit *io = bio->bi_private;
drivers/md/raid5-cache.c
566
if (bio->bi_status)
drivers/md/raid5-cache.c
569
bio_put(bio);
drivers/md/raid5-cache.c
606
struct bio *bi;
drivers/md/raid5-cache.c
734
static struct bio *r5l_bio_alloc(struct r5l_log *log)
drivers/md/raid5-cache.c
736
struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
drivers/md/raid5-cache.c
739
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
drivers/md/raid5-cache.c
741
return bio;
drivers/md/raid5-log.h
104
static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)
drivers/md/raid5-log.h
109
ret = r5l_handle_flush_request(conf->log, bio);
drivers/md/raid5-log.h
11
int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
drivers/md/raid5-log.h
111
ret = ppl_handle_flush_request(bio);
drivers/md/raid5-log.h
44
int ppl_handle_flush_request(struct bio *bio);
drivers/md/raid5-ppl.c
153
struct bio bio;
drivers/md/raid5-ppl.c
253
bio_init(&io->bio, log->rdev->bdev, io->biovec, PPL_IO_INLINE_BVECS,
drivers/md/raid5-ppl.c
398
static void ppl_log_endio(struct bio *bio)
drivers/md/raid5-ppl.c
400
struct ppl_io_unit *io = bio->bi_private;
drivers/md/raid5-ppl.c
407
if (bio->bi_status)
drivers/md/raid5-ppl.c
418
static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
drivers/md/raid5-ppl.c
421
__func__, io->seq, bio->bi_iter.bi_size,
drivers/md/raid5-ppl.c
422
(unsigned long long)bio->bi_iter.bi_sector,
drivers/md/raid5-ppl.c
423
bio->bi_bdev);
drivers/md/raid5-ppl.c
425
submit_bio(bio);
drivers/md/raid5-ppl.c
433
struct bio *bio = &io->bio;
drivers/md/raid5-ppl.c
437
bio->bi_private = io;
drivers/md/raid5-ppl.c
440
ppl_log_endio(bio);
drivers/md/raid5-ppl.c
466
bio->bi_end_io = ppl_log_endio;
drivers/md/raid5-ppl.c
467
bio->bi_iter.bi_sector = log->next_io_sector;
drivers/md/raid5-ppl.c
468
__bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
drivers/md/raid5-ppl.c
492
if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
drivers/md/raid5-ppl.c
493
struct bio *prev = bio;
drivers/md/raid5-ppl.c
495
bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS,
drivers/md/raid5-ppl.c
498
bio->bi_iter.bi_sector = bio_end_sector(prev);
drivers/md/raid5-ppl.c
499
__bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
drivers/md/raid5-ppl.c
501
bio_chain(bio, prev);
drivers/md/raid5-ppl.c
506
ppl_submit_iounit_bio(io, bio);
drivers/md/raid5-ppl.c
581
static void ppl_flush_endio(struct bio *bio)
drivers/md/raid5-ppl.c
583
struct ppl_io_unit *io = bio->bi_private;
drivers/md/raid5-ppl.c
588
pr_debug("%s: dev: %pg\n", __func__, bio->bi_bdev);
drivers/md/raid5-ppl.c
590
if (bio->bi_status) {
drivers/md/raid5-ppl.c
594
rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
drivers/md/raid5-ppl.c
600
bio_put(bio);
drivers/md/raid5-ppl.c
628
struct bio *bio;
drivers/md/raid5-ppl.c
630
bio = bio_alloc_bioset(bdev, 0,
drivers/md/raid5-ppl.c
633
bio->bi_private = io;
drivers/md/raid5-ppl.c
634
bio->bi_end_io = ppl_flush_endio;
drivers/md/raid5-ppl.c
636
pr_debug("%s: dev: %ps\n", __func__, bio->bi_bdev);
drivers/md/raid5-ppl.c
638
submit_bio(bio);
drivers/md/raid5-ppl.c
680
int ppl_handle_flush_request(struct bio *bio)
drivers/md/raid5-ppl.c
682
if (bio->bi_iter.bi_size == 0) {
drivers/md/raid5-ppl.c
683
bio_endio(bio);
drivers/md/raid5-ppl.c
686
bio->bi_opf &= ~REQ_PREFLUSH;
drivers/md/raid5.c
1035
struct bio *bio;
drivers/md/raid5.c
1037
while ((bio = bio_list_pop(tmp)))
drivers/md/raid5.c
1038
submit_bio_noacct(bio);
drivers/md/raid5.c
1138
raid5_end_read_request(struct bio *bi);
drivers/md/raid5.c
1140
raid5_end_write_request(struct bio *bi);
drivers/md/raid5.c
1162
struct bio *bi, *rbi;
drivers/md/raid5.c
1362
async_copy_data(int frombio, struct bio *bio, struct page **page,
drivers/md/raid5.c
1374
if (bio->bi_iter.bi_sector >= sector)
drivers/md/raid5.c
1375
page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
drivers/md/raid5.c
1377
page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
drivers/md/raid5.c
1383
bio_for_each_segment(bvl, bio, iter) {
drivers/md/raid5.c
1445
struct bio *rbi, *rbi2;
drivers/md/raid5.c
1478
struct bio *rbi;
drivers/md/raid5.c
1924
struct bio *chosen;
drivers/md/raid5.c
1928
struct bio *wbi;
drivers/md/raid5.c
2710
static void raid5_end_read_request(struct bio * bi)
drivers/md/raid5.c
2836
static void raid5_end_write_request(struct bio *bi)
drivers/md/raid5.c
3433
static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi,
drivers/md/raid5.c
3437
struct bio **bip;
drivers/md/raid5.c
3493
static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
drivers/md/raid5.c
3497
struct bio **bip;
drivers/md/raid5.c
3551
static bool add_stripe_bio(struct stripe_head *sh, struct bio *bi,
drivers/md/raid5.c
3592
struct bio *bi;
drivers/md/raid5.c
3624
struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector);
drivers/md/raid5.c
3640
struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
drivers/md/raid5.c
3664
struct bio *nextbi =
drivers/md/raid5.c
4004
struct bio *wbi, *wbi2;
drivers/md/raid5.c
5336
static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
drivers/md/raid5.c
5339
sector_t sector = bio->bi_iter.bi_sector;
drivers/md/raid5.c
5341
unsigned int bio_sectors = bio_sectors(bio);
drivers/md/raid5.c
5352
static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
drivers/md/raid5.c
5365
static struct bio *remove_bio_from_retry(struct r5conf *conf,
drivers/md/raid5.c
5368
struct bio *bi;
drivers/md/raid5.c
5392
static void raid5_align_endio(struct bio *bi)
drivers/md/raid5.c
5394
struct bio *raid_bi = bi->bi_private;
drivers/md/raid5.c
5416
static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
drivers/md/raid5.c
5419
struct bio *align_bio;
drivers/md/raid5.c
5492
static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
drivers/md/raid5.c
5689
static void make_discard_request(struct mddev *mddev, struct bio *bi)
drivers/md/raid5.c
5815
struct bio *bi, int forwrite, int previous)
drivers/md/raid5.c
5951
sector_t logical_sector, struct bio *bi)
drivers/md/raid5.c
6057
struct bio *bi)
drivers/md/raid5.c
6084
static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
drivers/md/raid5.c
6600
static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
drivers/md/raid5.c
6779
struct bio *bio;
drivers/md/raid5.c
6804
while ((bio = remove_bio_from_retry(conf, &offset))) {
drivers/md/raid5.c
6807
ok = retry_aligned_read(conf, bio, offset);
drivers/md/raid5.c
7667
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
drivers/md/raid5.h
262
struct bio req, rreq;
drivers/md/raid5.h
266
struct bio *toread, *read, *towrite, *written;
drivers/md/raid5.h
616
struct bio *retry_read_aligned; /* currently retrying aligned bios */
drivers/md/raid5.h
618
struct bio *retry_read_aligned_list; /* aligned bios retry list */
drivers/md/raid5.h
716
static inline struct bio *r5_next_bio(struct r5conf *conf, struct bio *bio, sector_t sector)
drivers/md/raid5.h
718
if (bio_end_sector(bio) < sector + RAID5_STRIPE_SECTORS(conf))
drivers/md/raid5.h
719
return bio->bi_next;
drivers/mtd/mtd_blkdevs.c
67
buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
drivers/mtd/mtd_blkdevs.c
70
kunmap(bio_page(req->bio));
drivers/mtd/mtd_blkdevs.c
74
kunmap(bio_page(req->bio));
drivers/mtd/mtd_blkdevs.c
86
buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
drivers/mtd/mtd_blkdevs.c
89
kunmap(bio_page(req->bio));
drivers/mtd/mtd_blkdevs.c
93
kunmap(bio_page(req->bio));
drivers/nvdimm/btt.c
1434
static void btt_submit_bio(struct bio *bio)
drivers/nvdimm/btt.c
1436
struct bio_integrity_payload *bip = bio_integrity(bio);
drivers/nvdimm/btt.c
1437
struct btt *btt = bio->bi_bdev->bd_disk->private_data;
drivers/nvdimm/btt.c
1444
if (!bio_integrity_prep(bio))
drivers/nvdimm/btt.c
1447
do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
drivers/nvdimm/btt.c
1449
start = bio_start_io_acct(bio);
drivers/nvdimm/btt.c
1450
bio_for_each_segment(bvec, bio, iter) {
drivers/nvdimm/btt.c
1457
bio->bi_status = BLK_STS_IOERR;
drivers/nvdimm/btt.c
1462
bio_op(bio), iter.bi_sector);
drivers/nvdimm/btt.c
1466
(op_is_write(bio_op(bio))) ? "WRITE" :
drivers/nvdimm/btt.c
1469
bio->bi_status = errno_to_blk_status(err);
drivers/nvdimm/btt.c
1474
bio_end_io_acct(bio, start);
drivers/nvdimm/btt.c
1476
bio_endio(bio);
drivers/nvdimm/nd.h
424
int (*flush)(struct nd_region *nd_region, struct bio *bio);
drivers/nvdimm/nd_virtio.c
111
int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
drivers/nvdimm/nd_virtio.c
117
if (bio && bio->bi_iter.bi_sector != -1) {
drivers/nvdimm/nd_virtio.c
118
struct bio *child = bio_alloc(bio->bi_bdev, 0,
drivers/nvdimm/nd_virtio.c
124
bio_clone_blkg_association(child, bio);
drivers/nvdimm/nd_virtio.c
126
bio_chain(child, bio);
drivers/nvdimm/pmem.c
200
static void pmem_submit_bio(struct bio *bio)
drivers/nvdimm/pmem.c
208
struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data;
drivers/nvdimm/pmem.c
211
if (bio->bi_opf & REQ_PREFLUSH)
drivers/nvdimm/pmem.c
212
ret = nvdimm_flush(nd_region, bio);
drivers/nvdimm/pmem.c
214
do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
drivers/nvdimm/pmem.c
216
start = bio_start_io_acct(bio);
drivers/nvdimm/pmem.c
217
bio_for_each_segment(bvec, bio, iter) {
drivers/nvdimm/pmem.c
218
if (op_is_write(bio_op(bio)))
drivers/nvdimm/pmem.c
225
bio->bi_status = rc;
drivers/nvdimm/pmem.c
230
bio_end_io_acct(bio, start);
drivers/nvdimm/pmem.c
232
if (bio->bi_opf & REQ_FUA)
drivers/nvdimm/pmem.c
233
ret = nvdimm_flush(nd_region, bio);
drivers/nvdimm/pmem.c
236
bio->bi_status = errno_to_blk_status(ret);
drivers/nvdimm/pmem.c
238
bio_endio(bio);
drivers/nvdimm/region_devs.c
1111
int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
drivers/nvdimm/region_devs.c
1118
if (nd_region->flush(nd_region, bio))
drivers/nvdimm/virtio_pmem.h
58
int async_pmem_flush(struct nd_region *nd_region, struct bio *bio);
drivers/nvme/host/core.c
1006
u16 write_stream = req->bio->bi_write_stream;
drivers/nvme/host/core.c
1048
if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD))
drivers/nvme/host/core.c
1050
if (bio_integrity_flagged(req->bio, BIP_CHECK_REFTAG)) {
drivers/nvme/host/core.c
1056
if (bio_integrity_flagged(req->bio, BIP_CHECK_APPTAG)) {
drivers/nvme/host/core.c
829
struct bio *bio;
drivers/nvme/host/core.c
860
__rq_for_each_bio(bio, req) {
drivers/nvme/host/core.c
862
bio->bi_iter.bi_sector);
drivers/nvme/host/core.c
863
u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift;
drivers/nvme/host/core.c
896
cmnd->rw.lbat = cpu_to_le16(bio_integrity(req->bio)->app_tag);
drivers/nvme/host/fc.c
1889
if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio)
drivers/nvme/host/fc.c
1891
return blkcg_get_fc_appid(rq->bio);
drivers/nvme/host/ioctl.c
125
struct bio *bio = NULL;
drivers/nvme/host/ioctl.c
157
if (bio)
drivers/nvme/host/ioctl.c
158
blk_rq_unmap_user(bio);
drivers/nvme/host/ioctl.c
170
struct bio *bio;
drivers/nvme/host/ioctl.c
186
bio = req->bio;
drivers/nvme/host/ioctl.c
193
if (bio)
drivers/nvme/host/ioctl.c
194
blk_rq_unmap_user(bio);
drivers/nvme/host/ioctl.c
390
struct bio *bio;
drivers/nvme/host/ioctl.c
406
if (pdu->bio)
drivers/nvme/host/ioctl.c
407
blk_rq_unmap_user(pdu->bio);
drivers/nvme/host/ioctl.c
437
if (pdu->bio)
drivers/nvme/host/ioctl.c
438
blk_rq_unmap_user(pdu->bio);
drivers/nvme/host/ioctl.c
529
pdu->bio = req->bio;
drivers/nvme/host/multipath.c
142
struct bio *bio;
drivers/nvme/host/multipath.c
157
for (bio = req->bio; bio; bio = bio->bi_next) {
drivers/nvme/host/multipath.c
158
bio_set_dev(bio, ns->head->disk->part0);
drivers/nvme/host/multipath.c
159
if (bio->bi_opf & REQ_POLLED) {
drivers/nvme/host/multipath.c
160
bio->bi_opf &= ~REQ_POLLED;
drivers/nvme/host/multipath.c
161
bio->bi_cookie = BLK_QC_T_NONE;
drivers/nvme/host/multipath.c
170
bio->bi_opf &= ~REQ_NOWAIT;
drivers/nvme/host/multipath.c
511
static void nvme_ns_head_submit_bio(struct bio *bio)
drivers/nvme/host/multipath.c
513
struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
drivers/nvme/host/multipath.c
523
bio = bio_split_to_limits(bio);
drivers/nvme/host/multipath.c
524
if (!bio)
drivers/nvme/host/multipath.c
530
bio_set_dev(bio, ns->disk->part0);
drivers/nvme/host/multipath.c
531
bio->bi_opf |= REQ_NVME_MPATH;
drivers/nvme/host/multipath.c
532
trace_block_bio_remap(bio, disk_devt(ns->head->disk),
drivers/nvme/host/multipath.c
533
bio->bi_iter.bi_sector);
drivers/nvme/host/multipath.c
534
submit_bio_noacct(bio);
drivers/nvme/host/multipath.c
539
bio_list_add(&head->requeue_list, bio);
drivers/nvme/host/multipath.c
544
bio_io_error(bio);
drivers/nvme/host/multipath.c
669
struct bio *bio, *next;
drivers/nvme/host/multipath.c
675
while ((bio = next) != NULL) {
drivers/nvme/host/multipath.c
676
next = bio->bi_next;
drivers/nvme/host/multipath.c
677
bio->bi_next = NULL;
drivers/nvme/host/multipath.c
679
submit_bio_noacct(bio);
drivers/nvme/host/nvme.h
1048
if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
drivers/nvme/host/nvme.h
1049
trace_block_bio_complete(ns->head->disk->queue, req->bio);
drivers/nvme/host/rdma.c
1414
struct bio *bio = rq->bio;
drivers/nvme/host/rdma.c
1416
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
drivers/nvme/host/tcp.c
119
struct bio *curr_bio;
drivers/nvme/host/tcp.c
2721
req->curr_bio = rq->bio;
drivers/nvme/host/tcp.c
354
struct bio *bio = req->curr_bio;
drivers/nvme/host/tcp.c
358
vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
drivers/nvme/host/tcp.c
360
bio_for_each_bvec(bv, bio, bi) {
drivers/nvme/host/tcp.c
363
size = bio->bi_iter.bi_size;
drivers/nvme/host/tcp.c
364
offset = bio->bi_iter.bi_bvec_done;
drivers/nvme/target/io-cmd-bdev.c
180
static void nvmet_bio_done(struct bio *bio)
drivers/nvme/target/io-cmd-bdev.c
182
struct nvmet_req *req = bio->bi_private;
drivers/nvme/target/io-cmd-bdev.c
183
blk_status_t blk_status = bio->bi_status;
drivers/nvme/target/io-cmd-bdev.c
185
nvmet_req_bio_put(req, bio);
drivers/nvme/target/io-cmd-bdev.c
190
static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
drivers/nvme/target/io-cmd-bdev.c
204
bip = bio_integrity_alloc(bio, GFP_NOIO,
drivers/nvme/target/io-cmd-bdev.c
212
bip_set_seed(bip, bio->bi_iter.bi_sector >>
drivers/nvme/target/io-cmd-bdev.c
215
resid = bio_integrity_bytes(bi, bio_sectors(bio));
drivers/nvme/target/io-cmd-bdev.c
218
rc = bio_integrity_add_page(bio, miter->page, len,
drivers/nvme/target/io-cmd-bdev.c
235
static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
drivers/nvme/target/io-cmd-bdev.c
245
struct bio *bio;
drivers/nvme/target/io-cmd-bdev.c
282
bio = &req->b.inline_bio;
drivers/nvme/target/io-cmd-bdev.c
283
bio_init(bio, req->ns->bdev, req->inline_bvec,
drivers/nvme/target/io-cmd-bdev.c
286
bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf,
drivers/nvme/target/io-cmd-bdev.c
289
bio->bi_iter.bi_sector = sector;
drivers/nvme/target/io-cmd-bdev.c
290
bio->bi_private = req;
drivers/nvme/target/io-cmd-bdev.c
291
bio->bi_end_io = nvmet_bio_done;
drivers/nvme/target/io-cmd-bdev.c
299
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
drivers/nvme/target/io-cmd-bdev.c
301
struct bio *prev = bio;
drivers/nvme/target/io-cmd-bdev.c
304
rc = nvmet_bdev_alloc_bip(req, bio,
drivers/nvme/target/io-cmd-bdev.c
307
bio_io_error(bio);
drivers/nvme/target/io-cmd-bdev.c
312
bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
drivers/nvme/target/io-cmd-bdev.c
314
bio->bi_iter.bi_sector = sector;
drivers/nvme/target/io-cmd-bdev.c
316
bio_chain(bio, prev);
drivers/nvme/target/io-cmd-bdev.c
325
rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
drivers/nvme/target/io-cmd-bdev.c
327
bio_io_error(bio);
drivers/nvme/target/io-cmd-bdev.c
332
submit_bio(bio);
drivers/nvme/target/io-cmd-bdev.c
338
struct bio *bio = &req->b.inline_bio;
drivers/nvme/target/io-cmd-bdev.c
348
bio_init(bio, req->ns->bdev, req->inline_bvec,
drivers/nvme/target/io-cmd-bdev.c
350
bio->bi_private = req;
drivers/nvme/target/io-cmd-bdev.c
351
bio->bi_end_io = nvmet_bio_done;
drivers/nvme/target/io-cmd-bdev.c
353
submit_bio(bio);
drivers/nvme/target/io-cmd-bdev.c
370
struct bio *bio = NULL;
drivers/nvme/target/io-cmd-bdev.c
384
GFP_KERNEL, &bio);
drivers/nvme/target/io-cmd-bdev.c
387
if (bio) {
drivers/nvme/target/io-cmd-bdev.c
388
bio->bi_private = req;
drivers/nvme/target/io-cmd-bdev.c
389
bio->bi_end_io = nvmet_bio_done;
drivers/nvme/target/io-cmd-bdev.c
391
bio_io_error(bio);
drivers/nvme/target/io-cmd-bdev.c
393
submit_bio(bio);
drivers/nvme/target/io-cmd-bdev.c
420
struct bio *bio = NULL;
drivers/nvme/target/io-cmd-bdev.c
433
GFP_KERNEL, &bio, 0);
drivers/nvme/target/io-cmd-bdev.c
434
if (bio) {
drivers/nvme/target/io-cmd-bdev.c
435
bio->bi_private = req;
drivers/nvme/target/io-cmd-bdev.c
436
bio->bi_end_io = nvmet_bio_done;
drivers/nvme/target/io-cmd-bdev.c
437
submit_bio(bio);
drivers/nvme/target/nvmet.h
457
struct bio inline_bio;
drivers/nvme/target/nvmet.h
466
struct bio inline_bio;
drivers/nvme/target/nvmet.h
473
struct bio inline_bio;
drivers/nvme/target/nvmet.h
867
static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
drivers/nvme/target/nvmet.h
869
if (bio != &req->b.inline_bio)
drivers/nvme/target/nvmet.h
870
bio_put(bio);
drivers/nvme/target/nvmet.h
872
bio_uninit(bio);
drivers/nvme/target/passthru.c
264
struct bio *bio;
drivers/nvme/target/passthru.c
272
bio = &req->p.inline_bio;
drivers/nvme/target/passthru.c
273
bio_init(bio, NULL, req->inline_bvec,
drivers/nvme/target/passthru.c
276
bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
drivers/nvme/target/passthru.c
278
bio->bi_end_io = bio_put;
drivers/nvme/target/passthru.c
282
if (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) <
drivers/nvme/target/passthru.c
287
ret = blk_rq_append_bio(rq, bio);
drivers/nvme/target/passthru.c
293
nvmet_req_bio_put(req, bio);
drivers/nvme/target/zns.c
400
struct bio *bio = NULL;
drivers/nvme/target/zns.c
427
bio = blk_next_bio(bio, bdev, 0,
drivers/nvme/target/zns.c
430
bio->bi_iter.bi_sector = sector;
drivers/nvme/target/zns.c
437
if (bio) {
drivers/nvme/target/zns.c
438
ret = submit_bio_wait(bio);
drivers/nvme/target/zns.c
439
bio_put(bio);
drivers/nvme/target/zns.c
520
static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
drivers/nvme/target/zns.c
522
struct nvmet_req *req = bio->bi_private;
drivers/nvme/target/zns.c
524
if (bio->bi_status == BLK_STS_OK) {
drivers/nvme/target/zns.c
526
nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector);
drivers/nvme/target/zns.c
529
nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
drivers/nvme/target/zns.c
530
nvmet_req_bio_put(req, bio);
drivers/nvme/target/zns.c
541
struct bio *bio;
drivers/nvme/target/zns.c
573
bio = &req->z.inline_bio;
drivers/nvme/target/zns.c
574
bio_init(bio, req->ns->bdev, req->inline_bvec,
drivers/nvme/target/zns.c
577
bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL);
drivers/nvme/target/zns.c
580
bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
drivers/nvme/target/zns.c
581
bio->bi_iter.bi_sector = sect;
drivers/nvme/target/zns.c
582
bio->bi_private = req;
drivers/nvme/target/zns.c
584
bio->bi_opf |= REQ_FUA;
drivers/nvme/target/zns.c
589
if (bio_add_page(bio, sg_page(sg), len, sg->offset) != len) {
drivers/nvme/target/zns.c
601
submit_bio(bio);
drivers/nvme/target/zns.c
605
nvmet_req_bio_put(req, bio);
drivers/s390/block/dasd_diag.c
186
private->iob.bio_list = dreq->bio;
drivers/s390/block/dasd_diag.c
322
struct dasd_diag_bio *bio;
drivers/s390/block/dasd_diag.c
398
bio = kzalloc_obj(*bio);
drivers/s390/block/dasd_diag.c
399
if (bio == NULL) {
drivers/s390/block/dasd_diag.c
410
memset(bio, 0, sizeof(*bio));
drivers/s390/block/dasd_diag.c
411
bio->type = MDSK_READ_REQ;
drivers/s390/block/dasd_diag.c
412
bio->block_number = private->pt_block + 1;
drivers/s390/block/dasd_diag.c
413
bio->buffer = label;
drivers/s390/block/dasd_diag.c
420
private->iob.bio_list = bio;
drivers/s390/block/dasd_diag.c
468
kfree(bio);
drivers/s390/block/dasd_diag.c
546
cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, struct_size(dreq, bio, count),
drivers/s390/block/dasd_diag.c
553
dbio = dreq->bio;
drivers/s390/block/dasd_diag.c
60
struct dasd_diag_bio bio[];
drivers/s390/block/dcssblk.c
31
static void dcssblk_submit_bio(struct bio *bio);
drivers/s390/block/dcssblk.c
880
dcssblk_submit_bio(struct bio *bio)
drivers/s390/block/dcssblk.c
891
dev_info = bio->bi_bdev->bd_disk->private_data;
drivers/s390/block/dcssblk.c
894
if (!IS_ALIGNED(bio->bi_iter.bi_sector, 8) ||
drivers/s390/block/dcssblk.c
895
!IS_ALIGNED(bio->bi_iter.bi_size, PAGE_SIZE))
drivers/s390/block/dcssblk.c
905
if (bio_data_dir(bio) == WRITE) {
drivers/s390/block/dcssblk.c
913
index = (bio->bi_iter.bi_sector >> 3);
drivers/s390/block/dcssblk.c
914
bio_for_each_segment(bvec, bio, iter) {
drivers/s390/block/dcssblk.c
921
if (bio_data_dir(bio) == READ)
drivers/s390/block/dcssblk.c
927
bio_endio(bio);
drivers/s390/block/dcssblk.c
930
bio_io_error(bio);
drivers/scsi/lpfc/lpfc_scsi.c
5216
struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
drivers/scsi/lpfc/lpfc_scsi.c
5218
if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio)
drivers/scsi/lpfc/lpfc_scsi.c
5220
return blkcg_get_fc_appid(bio);
drivers/scsi/scsi_bsg.c
17
struct bio *bio;
drivers/scsi/scsi_bsg.c
60
bio = rq->bio;
drivers/scsi/scsi_bsg.c
92
blk_rq_unmap_user(bio);
drivers/scsi/scsi_ioctl.c
389
struct bio *bio)
drivers/scsi/scsi_ioctl.c
419
r = blk_rq_unmap_user(bio);
drivers/scsi/scsi_ioctl.c
435
struct bio *bio;
drivers/scsi/scsi_ioctl.c
478
bio = rq->bio;
drivers/scsi/scsi_ioctl.c
487
ret = scsi_complete_sghdr_rq(rq, hdr, bio);
drivers/scsi/scsi_lib.c
1301
if (req->bio) {
drivers/scsi/scsi_lib.c
745
struct bio *bio;
drivers/scsi/scsi_lib.c
757
for (bio = rq->bio; bio; bio = bio->bi_next) {
drivers/scsi/scsi_lib.c
758
if ((bio->bi_opf & ff) != ff)
drivers/scsi/scsi_lib.c
760
bytes += bio->bi_iter.bi_size;
drivers/scsi/sd.c
1230
return min3((u32)rq->bio->bi_write_hint,
drivers/scsi/sd.c
1429
sdp->use_10_for_rw || protect || rq->bio->bi_write_hint) {
drivers/scsi/sd.c
896
struct bio *bio = rq->bio;
drivers/scsi/sd.c
901
if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
drivers/scsi/sd.c
904
if (bio_integrity_flagged(bio, BIP_CHECK_GUARD))
drivers/scsi/sd.c
911
if (bio_integrity_flagged(bio, BIP_CHECK_REFTAG))
drivers/scsi/sd.c
918
if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
drivers/scsi/sg.c
136
struct bio *bio;
drivers/scsi/sg.c
1816
srp->bio = rq->bio;
drivers/scsi/sg.c
1837
if (srp->bio)
drivers/scsi/sg.c
1838
ret = blk_rq_unmap_user(srp->bio);
drivers/scsi/sg.c
811
if (srp->bio) {
drivers/scsi/sr.c
324
if (rq->bio != NULL)
drivers/scsi/sr.c
325
block_sectors = bio_sectors(rq->bio);
drivers/scsi/sr.c
942
struct bio *bio;
drivers/scsi/sr.c
966
bio = rq->bio;
drivers/scsi/sr.c
978
if (blk_rq_unmap_user(bio))
drivers/scsi/st.c
534
struct bio *tmp;
drivers/scsi/st.c
541
tmp = SRpnt->bio;
drivers/scsi/st.c
592
SRpnt->bio = req->bio;
drivers/scsi/st.h
33
struct bio *bio;
drivers/target/target_core_iblock.c
334
static void iblock_bio_done(struct bio *bio)
drivers/target/target_core_iblock.c
336
struct se_cmd *cmd = bio->bi_private;
drivers/target/target_core_iblock.c
338
blk_status_t blk_status = bio->bi_status;
drivers/target/target_core_iblock.c
340
if (bio->bi_status) {
drivers/target/target_core_iblock.c
341
pr_err("bio error: %p, err: %d\n", bio, bio->bi_status);
drivers/target/target_core_iblock.c
349
bio_put(bio);
drivers/target/target_core_iblock.c
354
static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
drivers/target/target_core_iblock.c
358
struct bio *bio;
drivers/target/target_core_iblock.c
364
bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf,
drivers/target/target_core_iblock.c
366
if (!bio) {
drivers/target/target_core_iblock.c
371
bio->bi_private = cmd;
drivers/target/target_core_iblock.c
372
bio->bi_end_io = &iblock_bio_done;
drivers/target/target_core_iblock.c
373
bio->bi_iter.bi_sector = lba;
drivers/target/target_core_iblock.c
375
return bio;
drivers/target/target_core_iblock.c
381
struct bio *bio;
drivers/target/target_core_iblock.c
387
while ((bio = bio_list_pop(list)))
drivers/target/target_core_iblock.c
388
submit_bio(bio);
drivers/target/target_core_iblock.c
392
static void iblock_end_io_flush(struct bio *bio)
drivers/target/target_core_iblock.c
394
struct se_cmd *cmd = bio->bi_private;
drivers/target/target_core_iblock.c
396
if (bio->bi_status)
drivers/target/target_core_iblock.c
397
pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
drivers/target/target_core_iblock.c
400
if (bio->bi_status)
drivers/target/target_core_iblock.c
406
bio_put(bio);
drivers/target/target_core_iblock.c
418
struct bio *bio;
drivers/target/target_core_iblock.c
427
bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
drivers/target/target_core_iblock.c
429
bio->bi_end_io = iblock_end_io_flush;
drivers/target/target_core_iblock.c
431
bio->bi_private = cmd;
drivers/target/target_core_iblock.c
432
submit_bio(bio);
drivers/target/target_core_iblock.c
494
struct bio *bio;
drivers/target/target_core_iblock.c
530
bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
drivers/target/target_core_iblock.c
531
if (!bio)
drivers/target/target_core_iblock.c
535
bio_list_add(&list, bio);
drivers/target/target_core_iblock.c
540
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
drivers/target/target_core_iblock.c
543
bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
drivers/target/target_core_iblock.c
544
if (!bio)
drivers/target/target_core_iblock.c
548
bio_list_add(&list, bio);
drivers/target/target_core_iblock.c
560
while ((bio = bio_list_pop(&list)))
drivers/target/target_core_iblock.c
561
bio_put(bio);
drivers/target/target_core_iblock.c
688
iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
drivers/target/target_core_iblock.c
704
bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
drivers/target/target_core_iblock.c
711
bip_set_seed(bip, bio->bi_iter.bi_sector >>
drivers/target/target_core_iblock.c
717
resid = bio_integrity_bytes(bi, bio_sectors(bio));
drivers/target/target_core_iblock.c
721
rc = bio_integrity_add_page(bio, miter->page, len,
drivers/target/target_core_iblock.c
748
struct bio *bio;
drivers/target/target_core_iblock.c
796
bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
drivers/target/target_core_iblock.c
797
if (!bio)
drivers/target/target_core_iblock.c
801
bio_list_add(&list, bio);
drivers/target/target_core_iblock.c
816
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
drivers/target/target_core_iblock.c
819
rc = iblock_alloc_bip(cmd, bio, &prot_miter);
drivers/target/target_core_iblock.c
829
bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
drivers/target/target_core_iblock.c
830
if (!bio)
drivers/target/target_core_iblock.c
834
bio_list_add(&list, bio);
drivers/target/target_core_iblock.c
844
rc = iblock_alloc_bip(cmd, bio, &prot_miter);
drivers/target/target_core_iblock.c
854
while ((bio = bio_list_pop(&list)))
drivers/target/target_core_iblock.c
855
bio_put(bio);
drivers/target/target_core_pscsi.c
817
static void pscsi_bi_endio(struct bio *bio)
drivers/target/target_core_pscsi.c
819
bio_uninit(bio);
drivers/target/target_core_pscsi.c
820
kfree(bio);
drivers/target/target_core_pscsi.c
827
struct bio *bio = NULL;
drivers/target/target_core_pscsi.c
859
if (!bio) {
drivers/target/target_core_pscsi.c
862
bio = bio_kmalloc(nr_vecs, GFP_KERNEL);
drivers/target/target_core_pscsi.c
863
if (!bio)
drivers/target/target_core_pscsi.c
865
bio_init_inline(bio, NULL, nr_vecs,
drivers/target/target_core_pscsi.c
867
bio->bi_end_io = pscsi_bi_endio;
drivers/target/target_core_pscsi.c
870
" dir: %s nr_vecs: %d\n", bio,
drivers/target/target_core_pscsi.c
875
" bio: %p page: %p len: %d off: %d\n", i, bio,
drivers/target/target_core_pscsi.c
878
rc = bio_add_page(bio, page, bytes, off);
drivers/target/target_core_pscsi.c
880
bio_segments(bio), nr_vecs);
drivers/target/target_core_pscsi.c
884
" bio\n", bio->bi_vcnt, i, bio);
drivers/target/target_core_pscsi.c
886
rc = blk_rq_append_bio(req, bio);
drivers/target/target_core_pscsi.c
899
if (bio) {
drivers/target/target_core_pscsi.c
900
rc = blk_rq_append_bio(req, bio);
drivers/target/target_core_pscsi.c
909
if (bio) {
drivers/target/target_core_pscsi.c
910
bio_uninit(bio);
drivers/target/target_core_pscsi.c
911
kfree(bio);
drivers/target/target_core_pscsi.c
913
while (req->bio) {
drivers/target/target_core_pscsi.c
914
bio = req->bio;
drivers/target/target_core_pscsi.c
915
req->bio = bio->bi_next;
drivers/target/target_core_pscsi.c
916
bio_uninit(bio);
drivers/target/target_core_pscsi.c
917
kfree(bio);
fs/btrfs/bio.c
1008
u64 logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
fs/btrfs/bio.c
1009
u64 length = bbio->bio.bi_iter.bi_size;
fs/btrfs/bio.c
1014
ASSERT(btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE);
fs/btrfs/bio.c
1027
btrfs_submit_bio(&bbio->bio, NULL, &smap, mirror_num);
fs/btrfs/bio.c
1038
offsetof(struct btrfs_bio, bio),
fs/btrfs/bio.c
1042
offsetof(struct btrfs_bio, bio), 0))
fs/btrfs/bio.c
1045
offsetof(struct btrfs_bio, bio),
fs/btrfs/bio.c
119
bbio->bio.bi_status = status;
fs/btrfs/bio.c
120
if (bbio->bio.bi_pool == &btrfs_clone_bioset) {
fs/btrfs/bio.c
126
bio_put(&bbio->bio);
fs/btrfs/bio.c
141
bbio->bio.bi_status = READ_ONCE(bbio->status);
fs/btrfs/bio.c
171
btrfs_bio_end_io(fbio->bbio, fbio->bbio->bio.bi_status);
fs/btrfs/bio.c
198
btrfs_bio_for_each_block(paddr, &repair_bbio->bio, &saved_iter, step) {
fs/btrfs/bio.c
204
if (repair_bbio->bio.bi_status ||
fs/btrfs/bio.c
206
bio_reset(&repair_bbio->bio, NULL, REQ_OP_READ);
fs/btrfs/bio.c
207
repair_bbio->bio.bi_iter = repair_bbio->saved_iter;
fs/btrfs/bio.c
212
fbio->bbio->bio.bi_status = BLK_STS_IOERR;
fs/btrfs/bio.c
229
bio_put(&repair_bbio->bio);
fs/btrfs/bio.c
256
struct bio *repair_bio;
fs/btrfs/bio.c
266
failed_bbio->bio.bi_status = BLK_STS_IOERR;
fs/btrfs/bio.c
310
blk_status_t status = bbio->bio.bi_status;
fs/btrfs/bio.c
323
if (bbio->bio.bi_pool == &btrfs_repair_bioset) {
fs/btrfs/bio.c
329
bbio->bio.bi_status = BLK_STS_OK;
fs/btrfs/bio.c
331
btrfs_bio_for_each_block(paddr, &bbio->bio, iter, step) {
fs/btrfs/bio.c
348
btrfs_bio_end_io(bbio, bbio->bio.bi_status);
fs/btrfs/bio.c
351
static void btrfs_log_dev_io_error(const struct bio *bio, struct btrfs_device *dev)
fs/btrfs/bio.c
355
if (bio->bi_status != BLK_STS_IOERR && bio->bi_status != BLK_STS_TARGET)
fs/btrfs/bio.c
358
if (btrfs_op(bio) == BTRFS_MAP_WRITE)
fs/btrfs/bio.c
360
else if (!(bio->bi_opf & REQ_RAHEAD))
fs/btrfs/bio.c
362
if (bio->bi_opf & REQ_PREFLUSH)
fs/btrfs/bio.c
367
const struct bio *bio)
fs/btrfs/bio.c
369
if (bio->bi_opf & REQ_META)
fs/btrfs/bio.c
37
return is_data_bbio(bbio) && btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE;
fs/btrfs/bio.c
377
struct bio *bio = &bbio->bio;
fs/btrfs/bio.c
379
if (bio_op(bio) == REQ_OP_READ) {
fs/btrfs/bio.c
382
return btrfs_check_read_bio(bbio, bbio->bio.bi_private);
fs/btrfs/bio.c
383
return btrfs_bio_end_io(bbio, bbio->bio.bi_status);
fs/btrfs/bio.c
385
if (bio_is_zone_append(bio) && !bio->bi_status)
fs/btrfs/bio.c
387
btrfs_bio_end_io(bbio, bbio->bio.bi_status);
fs/btrfs/bio.c
390
static void btrfs_simple_end_io(struct bio *bio)
fs/btrfs/bio.c
392
struct btrfs_bio *bbio = btrfs_bio(bio);
fs/btrfs/bio.c
393
struct btrfs_device *dev = bio->bi_private;
fs/btrfs/bio.c
398
if (bio->bi_status)
fs/btrfs/bio.c
399
btrfs_log_dev_io_error(bio, dev);
fs/btrfs/bio.c
402
queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work);
fs/btrfs/bio.c
405
static void btrfs_raid56_end_io(struct bio *bio)
fs/btrfs/bio.c
407
struct btrfs_io_context *bioc = bio->bi_private;
fs/btrfs/bio.c
408
struct btrfs_bio *bbio = btrfs_bio(bio);
fs/btrfs/bio.c
415
if (bio_op(bio) == REQ_OP_READ && is_data_bbio(bbio))
fs/btrfs/bio.c
418
btrfs_bio_end_io(bbio, bbio->bio.bi_status);
fs/btrfs/bio.c
426
struct bio *bio = &bbio->bio;
fs/btrfs/bio.c
427
struct btrfs_io_stripe *stripe = bio->bi_private;
fs/btrfs/bio.c
432
if (bio->bi_status) {
fs/btrfs/bio.c
434
btrfs_log_dev_io_error(bio, stripe->dev);
fs/btrfs/bio.c
442
bio->bi_status = BLK_STS_IOERR;
fs/btrfs/bio.c
444
bio->bi_status = BLK_STS_OK;
fs/btrfs/bio.c
446
if (bio_is_zone_append(bio) && !bio->bi_status)
fs/btrfs/bio.c
447
stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
fs/btrfs/bio.c
449
btrfs_bio_end_io(bbio, bbio->bio.bi_status);
fs/btrfs/bio.c
453
static void btrfs_orig_write_end_io(struct bio *bio)
fs/btrfs/bio.c
455
struct btrfs_bio *bbio = btrfs_bio(bio);
fs/btrfs/bio.c
458
queue_work(btrfs_end_io_wq(bbio->inode->root->fs_info, bio), &bbio->end_io_work);
fs/btrfs/bio.c
464
struct bio *bio = &bbio->bio;
fs/btrfs/bio.c
465
struct btrfs_io_stripe *stripe = bio->bi_private;
fs/btrfs/bio.c
467
if (bio->bi_status) {
fs/btrfs/bio.c
469
btrfs_log_dev_io_error(bio, stripe->dev);
fs/btrfs/bio.c
470
} else if (bio_is_zone_append(bio)) {
fs/btrfs/bio.c
471
stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
fs/btrfs/bio.c
476
bio_put(bio);
fs/btrfs/bio.c
479
static void btrfs_clone_write_end_io(struct bio *bio)
fs/btrfs/bio.c
481
struct btrfs_bio *bbio = btrfs_bio(bio);
fs/btrfs/bio.c
484
queue_work(btrfs_end_io_wq(bbio->inode->root->fs_info, bio), &bbio->end_io_work);
fs/btrfs/bio.c
487
static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
fs/btrfs/bio.c
489
u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
fs/btrfs/bio.c
493
(btrfs_op(bio) == BTRFS_MAP_WRITE &&
fs/btrfs/bio.c
495
bio_io_error(bio);
fs/btrfs/bio.c
499
bio_set_dev(bio, dev->bdev);
fs/btrfs/bio.c
50
memset(bbio, 0, offsetof(struct btrfs_bio, bio));
fs/btrfs/bio.c
505
if (btrfs_bio(bio)->can_use_append && btrfs_dev_is_sequential(dev, physical)) {
fs/btrfs/bio.c
509
bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
fs/btrfs/bio.c
510
bio->bi_opf &= ~REQ_OP_WRITE;
fs/btrfs/bio.c
511
bio->bi_opf |= REQ_OP_ZONE_APPEND;
fs/btrfs/bio.c
515
__func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
fs/btrfs/bio.c
517
dev->devid, bio->bi_iter.bi_size);
fs/btrfs/bio.c
523
if (dev->fs_devices->collect_fs_stats && bio_op(bio) == REQ_OP_READ && dev->fs_info)
fs/btrfs/bio.c
525
bio->bi_iter.bi_size >> dev->fs_info->sectorsize_bits);
fs/btrfs/bio.c
527
if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT)
fs/btrfs/bio.c
528
blkcg_punt_bio_submit(bio);
fs/btrfs/bio.c
530
submit_bio(bio);
fs/btrfs/bio.c
535
struct bio *orig_bio = bioc->orig_bio, *bio;
fs/btrfs/bio.c
542
bio = orig_bio;
fs/btrfs/bio.c
543
bio->bi_end_io = btrfs_orig_write_end_io;
fs/btrfs/bio.c
546
bio = bio_alloc_clone(NULL, orig_bio, GFP_NOFS, &btrfs_bioset);
fs/btrfs/bio.c
548
btrfs_bio_init(btrfs_bio(bio), orig_bbio->inode,
fs/btrfs/bio.c
550
bio->bi_end_io = btrfs_clone_write_end_io;
fs/btrfs/bio.c
553
bio->bi_private = &bioc->stripes[dev_nr];
fs/btrfs/bio.c
554
bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT;
fs/btrfs/bio.c
556
bioc->size = bio->bi_iter.bi_size;
fs/btrfs/bio.c
557
btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio);
fs/btrfs/bio.c
560
static void btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc,
fs/btrfs/bio.c
565
btrfs_bio(bio)->mirror_num = mirror_num;
fs/btrfs/bio.c
566
bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT;
fs/btrfs/bio.c
567
if (bio_op(bio) != REQ_OP_READ)
fs/btrfs/bio.c
568
btrfs_bio(bio)->orig_physical = smap->physical;
fs/btrfs/bio.c
569
bio->bi_private = smap->dev;
fs/btrfs/bio.c
570
bio->bi_end_io = btrfs_simple_end_io;
fs/btrfs/bio.c
571
btrfs_submit_dev_bio(smap->dev, bio);
fs/btrfs/bio.c
574
bio->bi_private = bioc;
fs/btrfs/bio.c
575
bio->bi_end_io = btrfs_raid56_end_io;
fs/btrfs/bio.c
576
if (bio_op(bio) == REQ_OP_READ)
fs/btrfs/bio.c
577
raid56_parity_recover(bio, bioc, mirror_num);
fs/btrfs/bio.c
579
raid56_parity_write(bio, bioc);
fs/btrfs/bio.c
584
bioc->orig_bio = bio;
fs/btrfs/bio.c
592
if (bbio->bio.bi_opf & REQ_META)
fs/btrfs/bio.c
629
async->bbio->bio.bi_status = errno_to_blk_status(ret);
fs/btrfs/bio.c
646
struct bio *bio = &async->bbio->bio;
fs/btrfs/bio.c
654
if (bio->bi_status) {
fs/btrfs/bio.c
655
btrfs_bio_end_io(async->bbio, bio->bi_status);
fs/btrfs/bio.c
664
bio->bi_opf |= REQ_BTRFS_CGROUP_PUNT;
fs/btrfs/bio.c
665
btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num);
fs/btrfs/bio.c
690
if (op_is_sync(bbio->bio.bi_opf))
fs/btrfs/bio.c
694
if ((bbio->bio.bi_opf & REQ_META) && btrfs_is_zoned(fs_info))
fs/btrfs/bio.c
71
struct bio *bio;
fs/btrfs/bio.c
73
bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset);
fs/btrfs/bio.c
733
sector_offset = bio_split_rw_at(&bbio->bio, &fs_info->limits,
fs/btrfs/bio.c
74
bbio = btrfs_bio(bio);
fs/btrfs/bio.c
750
struct bio *bio = &bbio->bio;
fs/btrfs/bio.c
751
u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
fs/btrfs/bio.c
752
u64 length = bio->bi_iter.bi_size;
fs/btrfs/bio.c
765
ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
fs/btrfs/bio.c
778
if (bio_op(bio) == REQ_OP_WRITE && is_data_bbio(bbio))
fs/btrfs/bio.c
797
bio = &bbio->bio;
fs/btrfs/bio.c
804
if (bio_op(bio) == REQ_OP_READ && is_data_bbio(bbio)) {
fs/btrfs/bio.c
805
bbio->saved_iter = bio->bi_iter;
fs/btrfs/bio.c
812
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
fs/btrfs/bio.c
84
struct bio *bio;
fs/btrfs/bio.c
848
btrfs_submit_bio(bio, bioc, &smap, mirror_num);
fs/btrfs/bio.c
86
bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, GFP_NOFS,
fs/btrfs/bio.c
861
ASSERT(bbio->bio.bi_pool == &btrfs_clone_bioset);
fs/btrfs/bio.c
88
if (IS_ERR(bio))
fs/btrfs/bio.c
880
const u64 logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
fs/btrfs/bio.c
881
const u32 length = bbio->bio.bi_iter.bi_size;
fs/btrfs/bio.c
889
bio_for_each_bvec(bvec, &bbio->bio, iter)
fs/btrfs/bio.c
89
return ERR_CAST(bio);
fs/btrfs/bio.c
91
bbio = btrfs_bio(bio);
fs/btrfs/bio.c
934
struct bio *bio = NULL;
fs/btrfs/bio.c
974
bio = bio_alloc(smap.dev->bdev, nr_steps, REQ_OP_WRITE | REQ_SYNC, GFP_NOFS);
fs/btrfs/bio.c
975
bio->bi_iter.bi_sector = smap.physical >> SECTOR_SHIFT;
fs/btrfs/bio.c
977
ret = bio_add_page(bio, phys_to_page(paddrs[i]), step, offset_in_page(paddrs[i]));
fs/btrfs/bio.c
981
ret = submit_bio_wait(bio);
fs/btrfs/bio.c
982
bio_put(bio);
fs/btrfs/bio.h
106
struct bio bio;
fs/btrfs/bio.h
109
static inline struct btrfs_bio *btrfs_bio(struct bio *bio)
fs/btrfs/bio.h
111
return container_of(bio, struct btrfs_bio, bio);
fs/btrfs/compression.c
1070
zero_fill_bio(&cb->orig_bbio->bio);
fs/btrfs/compression.c
1136
offsetof(struct compressed_bio, bbio.bio),
fs/btrfs/compression.c
1216
struct bio *orig_bio = &cb->orig_bbio->bio;
fs/btrfs/compression.c
234
blk_status_t status = bbio->bio.bi_status;
fs/btrfs/compression.c
241
bio_for_each_folio_all(fi, &bbio->bio)
fs/btrfs/compression.c
243
bio_put(&bbio->bio);
fs/btrfs/compression.c
260
ret = blk_status_to_errno(cb->bbio.bio.bi_status);
fs/btrfs/compression.c
296
cb->bbio.bio.bi_status == BLK_STS_OK);
fs/btrfs/compression.c
301
bio_for_each_folio_all(fi, &bbio->bio)
fs/btrfs/compression.c
303
bio_put(&cb->bbio.bio);
fs/btrfs/compression.c
332
ASSERT(cb->bbio.bio.bi_iter.bi_size == ordered->disk_num_bytes);
fs/btrfs/compression.c
334
cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
fs/btrfs/compression.c
376
struct bio *orig_bio = &cb->orig_bbio->bio;
fs/btrfs/compression.c
562
cb->len = bbio->bio.bi_iter.bi_size;
fs/btrfs/compression.c
580
ret = bio_add_folio(&cb->bbio.bio, folio, cur_len, 0);
fs/btrfs/compression.c
587
ASSERT(cb->bbio.bio.bi_iter.bi_size == compressed_len);
fs/btrfs/compression.c
592
cb->len = bbio->bio.bi_iter.bi_size;
fs/btrfs/compression.c
593
cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
fs/btrfs/compression.h
146
struct bio *bio = &cb->bbio.bio;
fs/btrfs/compression.h
149
bio_for_each_folio_all(fi, bio)
fs/btrfs/compression.h
151
bio_put(bio);
fs/btrfs/direct-io.c
1095
offsetof(struct btrfs_dio_private, bbio.bio),
fs/btrfs/direct-io.c
650
struct bio *bio = &bbio->bio;
fs/btrfs/direct-io.c
652
if (bio->bi_status) {
fs/btrfs/direct-io.c
655
btrfs_ino(inode), bio->bi_opf,
fs/btrfs/direct-io.c
656
dip->file_offset, dip->bytes, bio->bi_status);
fs/btrfs/direct-io.c
659
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
fs/btrfs/direct-io.c
662
!bio->bi_status);
fs/btrfs/direct-io.c
668
bbio->bio.bi_private = bbio->private;
fs/btrfs/direct-io.c
669
iomap_dio_bio_end_io(bio);
fs/btrfs/direct-io.c
675
u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
fs/btrfs/direct-io.c
676
u64 len = bbio->bio.bi_iter.bi_size;
fs/btrfs/direct-io.c
710
static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
fs/btrfs/direct-io.c
713
struct btrfs_bio *bbio = btrfs_bio(bio);
fs/btrfs/direct-io.c
719
btrfs_dio_end_io, bio->bi_private);
fs/btrfs/direct-io.c
722
dip->bytes = bio->bi_iter.bi_size;
fs/btrfs/direct-io.c
724
dio_data->submitted += bio->bi_iter.bi_size;
fs/btrfs/direct-io.c
741
bio->bi_status = errno_to_blk_status(ret);
fs/btrfs/direct-io.c
742
iomap_dio_bio_end_io(bio);
fs/btrfs/disk-io.c
267
if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len))
fs/btrfs/disk-io.c
3738
static void btrfs_end_super_write(struct bio *bio)
fs/btrfs/disk-io.c
3740
struct btrfs_device *device = bio->bi_private;
fs/btrfs/disk-io.c
3743
bio_for_each_folio_all(fi, bio) {
fs/btrfs/disk-io.c
3744
if (bio->bi_status) {
fs/btrfs/disk-io.c
3748
blk_status_to_errno(bio->bi_status));
fs/btrfs/disk-io.c
3752
if (bio->bi_opf & REQ_FUA)
fs/btrfs/disk-io.c
3762
bio_put(bio);
fs/btrfs/disk-io.c
3791
struct bio *bio;
fs/btrfs/disk-io.c
3835
bio = bio_alloc(device->bdev, 1,
fs/btrfs/disk-io.c
3838
bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
fs/btrfs/disk-io.c
3839
bio->bi_private = device;
fs/btrfs/disk-io.c
3840
bio->bi_end_io = btrfs_end_super_write;
fs/btrfs/disk-io.c
3841
bio_add_folio_nofail(bio, folio, BTRFS_SUPER_INFO_SIZE, offset);
fs/btrfs/disk-io.c
3849
bio->bi_opf |= REQ_FUA;
fs/btrfs/disk-io.c
3850
submit_bio(bio);
fs/btrfs/disk-io.c
3919
static void btrfs_end_empty_barrier(struct bio *bio)
fs/btrfs/disk-io.c
3921
bio_uninit(bio);
fs/btrfs/disk-io.c
3922
complete(bio->bi_private);
fs/btrfs/disk-io.c
3931
struct bio *bio = &device->flush_bio;
fs/btrfs/disk-io.c
3935
bio_init(bio, device->bdev, NULL, 0,
fs/btrfs/disk-io.c
3937
bio->bi_end_io = btrfs_end_empty_barrier;
fs/btrfs/disk-io.c
3939
bio->bi_private = &device->flush_wait;
fs/btrfs/disk-io.c
3940
submit_bio(bio);
fs/btrfs/disk-io.c
3950
struct bio *bio = &device->flush_bio;
fs/btrfs/disk-io.c
3957
if (bio->bi_status) {
fs/btrfs/extent_io.c
166
if (!(btrfs_op(&bbio->bio) == BTRFS_MAP_READ && is_data_inode(bbio->inode)))
fs/btrfs/extent_io.c
182
ASSERT(bbio->bio.bi_iter.bi_size);
fs/btrfs/extent_io.c
186
if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
fs/btrfs/extent_io.c
2203
if (bbio->bio.bi_status != BLK_STS_OK)
fs/btrfs/extent_io.c
2206
bio_for_each_folio_all(fi, &bbio->bio) {
fs/btrfs/extent_io.c
2212
bio_put(&bbio->bio);
fs/btrfs/extent_io.c
2255
bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
fs/btrfs/extent_io.c
2256
bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
fs/btrfs/extent_io.c
2257
wbc_init_bio(wbc, &bbio->bio);
fs/btrfs/extent_io.c
2269
bio_add_folio_nofail(&bbio->bio, folio, range_len,
fs/btrfs/extent_io.c
3842
bool uptodate = !bbio->bio.bi_status;
fs/btrfs/extent_io.c
3865
bio_put(&bbio->bio);
fs/btrfs/extent_io.c
3907
bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
fs/btrfs/extent_io.c
3915
bio_add_folio_nofail(&bbio->bio, folio, range_len,
fs/btrfs/extent_io.c
520
struct bio *bio = &bbio->bio;
fs/btrfs/extent_io.c
521
int error = blk_status_to_errno(bio->bi_status);
fs/btrfs/extent_io.c
525
ASSERT(!bio_flagged(bio, BIO_CLONED));
fs/btrfs/extent_io.c
526
bio_for_each_folio_all(fi, bio) {
fs/btrfs/extent_io.c
548
bio_put(bio);
fs/btrfs/extent_io.c
577
struct bio *bio = &bbio->bio;
fs/btrfs/extent_io.c
581
ASSERT(!bio_flagged(bio, BIO_CLONED));
fs/btrfs/extent_io.c
586
bio_for_each_folio_all(fi, &bbio->bio) {
fs/btrfs/extent_io.c
587
bool uptodate = !bio->bi_status;
fs/btrfs/extent_io.c
593
__func__, bio->bi_iter.bi_sector, bio->bi_status,
fs/btrfs/extent_io.c
625
bio_put(bio);
fs/btrfs/extent_io.c
719
struct bio *bio = &bio_ctrl->bbio->bio;
fs/btrfs/extent_io.c
727
return bio->bi_iter.bi_sector == sector;
fs/btrfs/extent_io.c
735
bio_end_sector(bio) == sector;
fs/btrfs/extent_io.c
747
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
fs/btrfs/extent_io.c
748
bbio->bio.bi_write_hint = inode->vfs_inode.i_write_hint;
fs/btrfs/extent_io.c
771
bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
fs/btrfs/extent_io.c
772
wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
fs/btrfs/extent_io.c
819
if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) {
fs/btrfs/file-item.c
350
struct bio *bio = &bbio->bio;
fs/btrfs/file-item.c
354
u32 orig_len = bio->bi_iter.bi_size;
fs/btrfs/file-item.c
355
u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
fs/btrfs/file-item.c
376
ASSERT(bio_op(bio) == REQ_OP_READ);
fs/btrfs/file-item.c
778
struct bio *bio = &bbio->bio;
fs/btrfs/file-item.c
789
btrfs_bio_for_each_block(paddr, bio, &iter, step) {
fs/btrfs/file-item.c
804
ASSERT(btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE);
fs/btrfs/file-item.c
818
struct bio *bio = &bbio->bio;
fs/btrfs/file-item.c
823
sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
fs/btrfs/file-item.c
831
sums->len = bio->bi_iter.bi_size;
fs/btrfs/file-item.c
837
csum_one_bio(bbio, &bbio->bio.bi_iter);
fs/btrfs/file-item.c
842
bbio->csum_saved_iter = bbio->bio.bi_iter;
fs/btrfs/file-item.c
858
bbio->sums->len = bbio->bio.bi_iter.bi_size;
fs/btrfs/file-item.c
859
bbio->sums->logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
fs/btrfs/inode.c
10004
ret = bio_add_folio(&cb->bbio.bio, folio, round_up(bytes, blocksize), 0);
fs/btrfs/inode.c
10011
ASSERT(cb->bbio.bio.bi_iter.bi_size == disk_num_bytes);
fs/btrfs/inode.c
10054
bio_first_folio_all(&cb->bbio.bio),
fs/btrfs/inode.c
1030
total_compressed = cb->bbio.bio.bi_iter.bi_size;
fs/btrfs/inode.c
1056
bio_first_folio_all(&cb->bbio.bio), false);
fs/btrfs/inode.c
1173
compressed_size = async_extent->cb->bbio.bio.bi_iter.bi_size;
fs/btrfs/inode.c
1200
async_extent->cb->bbio.bio.bi_iter.bi_sector = ins.objectid >> SECTOR_SHIFT;
fs/btrfs/inode.c
866
struct bio *bio = &cb->bbio.bio;
fs/btrfs/inode.c
881
bio_for_each_folio_all(fi, bio)
fs/btrfs/inode.c
886
ASSERT(bio->bi_vcnt);
fs/btrfs/inode.c
888
bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
fs/btrfs/inode.c
895
struct bio *bio = &cb->bbio.bio;
fs/btrfs/inode.c
897
const u32 bio_size = bio->bi_iter.bi_size;
fs/btrfs/inode.c
905
struct bio *bio = &cb->bbio.bio;
fs/btrfs/inode.c
907
const u32 bio_size = bio->bi_iter.bi_size;
fs/btrfs/inode.c
914
ret = bio_add_folio(bio, last_folio, round_up(foffset, blocksize) - foffset, foffset);
fs/btrfs/inode.c
9551
if (bbio->bio.bi_status) {
fs/btrfs/inode.c
9559
WRITE_ONCE(priv->status, bbio->bio.bi_status);
fs/btrfs/inode.c
9571
bio_put(&bbio->bio);
fs/btrfs/inode.c
9604
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
fs/btrfs/inode.c
9609
if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
fs/btrfs/inode.c
9615
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
fs/btrfs/lzo.c
134
static int write_and_queue_folio(struct bio *out_bio, struct folio **out_folio,
fs/btrfs/lzo.c
193
struct bio *out_bio,
fs/btrfs/lzo.c
281
struct bio *bio = &cb->bbio.bio;
fs/btrfs/lzo.c
296
ASSERT(bio->bi_iter.bi_size == 0);
fs/btrfs/lzo.c
304
ret = write_and_queue_folio(bio, &folio_out, &total_out, LZO_LEN);
fs/btrfs/lzo.c
335
ret = copy_compressed_data_to_bio(fs_info, bio, workspace->cbuf, out_len,
fs/btrfs/lzo.c
364
sizes_ptr = kmap_local_folio(bio_first_folio_all(bio), 0);
fs/btrfs/lzo.c
396
bio_next_folio(fi, &cb->bbio.bio);
fs/btrfs/lzo.c
446
bio_first_folio(&fi, &cb->bbio.bio, 0);
fs/btrfs/misc.h
31
static inline phys_addr_t bio_iter_phys(struct bio *bio, struct bvec_iter *iter)
fs/btrfs/misc.h
33
struct bio_vec bv = bio_iter_iovec(bio, *iter);
fs/btrfs/misc.h
50
#define btrfs_bio_for_each_block(paddr, bio, iter, blocksize) \
fs/btrfs/misc.h
52
(paddr = bio_iter_phys((bio), (iter)), 1); \
fs/btrfs/misc.h
53
bio_advance_iter_single((bio), (iter), (blocksize)))
fs/btrfs/misc.h
56
static inline struct bvec_iter init_bvec_iter_for_bio(struct bio *bio)
fs/btrfs/misc.h
62
bio_for_each_bvec_all(bvec, bio, i)
fs/btrfs/misc.h
73
#define btrfs_bio_for_each_block_all(paddr, bio, blocksize) \
fs/btrfs/misc.h
74
for (struct bvec_iter iter = init_bvec_iter_for_bio(bio); \
fs/btrfs/misc.h
76
(paddr = bio_iter_phys((bio), &(iter)), 1); \
fs/btrfs/misc.h
77
bio_advance_iter_single((bio), &(iter), (blocksize)))
fs/btrfs/raid56.c
1208
static int bio_add_paddrs(struct bio *bio, phys_addr_t *paddrs, unsigned int nr_steps,
fs/btrfs/raid56.c
1215
ret = bio_add_page(bio, phys_to_page(paddrs[i]), step,
fs/btrfs/raid56.c
1227
bio->bi_iter.bi_size -= added;
fs/btrfs/raid56.c
1243
struct bio *last = bio_list->tail;
fs/btrfs/raid56.c
1245
struct bio *bio;
fs/btrfs/raid56.c
1296
bio = bio_alloc(stripe->dev->bdev,
fs/btrfs/raid56.c
1299
bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT;
fs/btrfs/raid56.c
1300
bio->bi_private = rbio;
fs/btrfs/raid56.c
1302
ret = bio_add_paddrs(bio, paddrs, rbio->sector_nsteps, step);
fs/btrfs/raid56.c
1304
bio_list_add(bio_list, bio);
fs/btrfs/raid56.c
1308
static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
fs/btrfs/raid56.c
1313
struct bvec_iter iter = bio->bi_iter;
fs/btrfs/raid56.c
1315
u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
fs/btrfs/raid56.c
1318
btrfs_bio_for_each_block(paddr, bio, &iter, step) {
fs/btrfs/raid56.c
1336
struct bio *bio;
fs/btrfs/raid56.c
1339
bio_list_for_each(bio, &rbio->bio_list)
fs/btrfs/raid56.c
1340
index_one_bio(rbio, bio);
fs/btrfs/raid56.c
1345
static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
fs/btrfs/raid56.c
1354
if (!bio->bi_bdev)
fs/btrfs/raid56.c
1358
if (bio->bi_bdev != bioc->stripes[i].dev->bdev)
fs/btrfs/raid56.c
1362
trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
fs/btrfs/raid56.c
1375
struct bio *bio;
fs/btrfs/raid56.c
1377
while ((bio = bio_list_pop(bio_list)))
fs/btrfs/raid56.c
1378
bio_put(bio);
fs/btrfs/raid56.c
1564
static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
fs/btrfs/raid56.c
1567
u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
fs/btrfs/raid56.c
1574
bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
fs/btrfs/raid56.c
1582
if (bio->bi_iter.bi_size == 0) {
fs/btrfs/raid56.c
1616
static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
fs/btrfs/raid56.c
1623
ASSERT(!bio_flagged(bio, BIO_CLONED));
fs/btrfs/raid56.c
1625
btrfs_bio_for_each_block_all(paddr, bio, step) {
fs/btrfs/raid56.c
1638
static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
fs/btrfs/raid56.c
1640
phys_addr_t bvec_paddr = bvec_phys(bio_first_bvec_all(bio));
fs/btrfs/raid56.c
1653
static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio)
fs/btrfs/raid56.c
1655
int total_sector_nr = get_bio_sector_nr(rbio, bio);
fs/btrfs/raid56.c
1660
bio_for_each_bvec_all(bvec, bio, i)
fs/btrfs/raid56.c
1676
struct bio *bio)
fs/btrfs/raid56.c
1681
int total_sector_nr = get_bio_sector_nr(rbio, bio);
fs/btrfs/raid56.c
1694
btrfs_bio_for_each_block_all(paddr, bio, step) {
fs/btrfs/raid56.c
1717
static void raid_wait_read_end_io(struct bio *bio)
fs/btrfs/raid56.c
1719
struct btrfs_raid_bio *rbio = bio->bi_private;
fs/btrfs/raid56.c
1721
if (bio->bi_status) {
fs/btrfs/raid56.c
1722
rbio_update_error_bitmap(rbio, bio);
fs/btrfs/raid56.c
1724
set_bio_pages_uptodate(rbio, bio);
fs/btrfs/raid56.c
1725
verify_bio_data_sectors(rbio, bio);
fs/btrfs/raid56.c
1728
bio_put(bio);
fs/btrfs/raid56.c
1736
struct bio *bio;
fs/btrfs/raid56.c
1739
while ((bio = bio_list_pop(bio_list))) {
fs/btrfs/raid56.c
1740
bio->bi_end_io = raid_wait_read_end_io;
fs/btrfs/raid56.c
1745
bio_get_trace_info(rbio, bio, &trace_info);
fs/btrfs/raid56.c
1746
trace_raid56_read(rbio, bio, &trace_info);
fs/btrfs/raid56.c
1748
submit_bio(bio);
fs/btrfs/raid56.c
1834
static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
fs/btrfs/raid56.c
1864
void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
fs/btrfs/raid56.c
1873
bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
fs/btrfs/raid56.c
1874
bio_endio(bio);
fs/btrfs/raid56.c
1878
rbio_add_bio(rbio, bio);
fs/btrfs/raid56.c
2268
void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
fs/btrfs/raid56.c
2276
bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
fs/btrfs/raid56.c
2277
bio_endio(bio);
fs/btrfs/raid56.c
2282
rbio_add_bio(rbio, bio);
fs/btrfs/raid56.c
2284
set_rbio_range_error(rbio, bio);
fs/btrfs/raid56.c
2407
static void raid_wait_write_end_io(struct bio *bio)
fs/btrfs/raid56.c
2409
struct btrfs_raid_bio *rbio = bio->bi_private;
fs/btrfs/raid56.c
2411
if (bio->bi_status)
fs/btrfs/raid56.c
2412
rbio_update_error_bitmap(rbio, bio);
fs/btrfs/raid56.c
2413
bio_put(bio);
fs/btrfs/raid56.c
2421
struct bio *bio;
fs/btrfs/raid56.c
2424
while ((bio = bio_list_pop(bio_list))) {
fs/btrfs/raid56.c
2425
bio->bi_end_io = raid_wait_write_end_io;
fs/btrfs/raid56.c
2430
bio_get_trace_info(rbio, bio, &trace_info);
fs/btrfs/raid56.c
2431
trace_raid56_write(rbio, bio, &trace_info);
fs/btrfs/raid56.c
2433
submit_bio(bio);
fs/btrfs/raid56.c
2569
struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
fs/btrfs/raid56.c
2581
bio_list_add(&rbio->bio_list, bio);
fs/btrfs/raid56.c
2586
ASSERT(!bio->bi_iter.bi_size);
fs/btrfs/raid56.c
947
static void rbio_endio_bio_list(struct bio *cur, blk_status_t status)
fs/btrfs/raid56.c
949
struct bio *next;
fs/btrfs/raid56.c
966
struct bio *cur = bio_list_get(&rbio->bio_list);
fs/btrfs/raid56.c
967
struct bio *extra;
fs/btrfs/raid56.h
275
void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
fs/btrfs/raid56.h
277
void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc);
fs/btrfs/raid56.h
279
struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
fs/btrfs/relocation.c
3980
if (bbio->bio.bi_status)
fs/btrfs/relocation.c
3981
WRITE_ONCE(priv->status, bbio->bio.bi_status);
fs/btrfs/relocation.c
3986
bio_put(&bbio->bio);
fs/btrfs/relocation.c
4003
bbio->bio.bi_iter.bi_sector = (addr >> SECTOR_SHIFT);
fs/btrfs/relocation.c
4010
if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
fs/btrfs/relocation.c
4017
bbio->bio.bi_iter.bi_sector = (addr >> SECTOR_SHIFT);
fs/btrfs/scrub.c
1253
int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
fs/btrfs/scrub.c
1259
bio_for_each_bvec_all(bvec, &bbio->bio, i)
fs/btrfs/scrub.c
1263
if (bbio->bio.bi_status) {
fs/btrfs/scrub.c
1269
bio_put(&bbio->bio);
fs/btrfs/scrub.c
1282
int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
fs/btrfs/scrub.c
1286
bio_for_each_bvec_all(bvec, &bbio->bio, i)
fs/btrfs/scrub.c
1289
if (bbio->bio.bi_status) {
fs/btrfs/scrub.c
1300
bio_put(&bbio->bio);
fs/btrfs/scrub.c
1311
u32 bio_len = bbio->bio.bi_iter.bi_size;
fs/btrfs/scrub.c
1312
u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
fs/btrfs/scrub.c
1821
bbio->bio.bi_iter.bi_size >= stripe_len)) {
fs/btrfs/scrub.c
1822
ASSERT(bbio->bio.bi_iter.bi_size);
fs/btrfs/scrub.c
1868
ASSERT(bbio->bio.bi_iter.bi_size);
fs/btrfs/scrub.c
2031
static void raid56_scrub_wait_endio(struct bio *bio)
fs/btrfs/scrub.c
2033
complete(bio->bi_private);
fs/btrfs/scrub.c
2126
struct bio bio;
fs/btrfs/scrub.c
2131
bio_init(&bio, NULL, NULL, 0, REQ_OP_READ);
fs/btrfs/scrub.c
2132
bio.bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
fs/btrfs/scrub.c
2133
bio.bi_private = &io_done;
fs/btrfs/scrub.c
2134
bio.bi_end_io = raid56_scrub_wait_endio;
fs/btrfs/scrub.c
2143
rbio = raid56_parity_alloc_scrub_rbio(&bio, bioc, scrub_dev, extent_bitmap,
fs/btrfs/scrub.c
2159
ret = blk_status_to_errno(bio.bi_status);
fs/btrfs/scrub.c
2162
bio_uninit(&bio);
fs/btrfs/scrub.c
895
int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
fs/btrfs/scrub.c
901
bio_for_each_bvec_all(bvec, &bbio->bio, i)
fs/btrfs/scrub.c
904
if (bbio->bio.bi_status) {
fs/btrfs/scrub.c
913
bio_put(&bbio->bio);
fs/btrfs/scrub.c
931
ret = bio_add_page(&bbio->bio, virt_to_page(kaddr), fs_info->sectorsize,
fs/btrfs/scrub.c
954
bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
fs/btrfs/scrub.c
973
bbio->bio.bi_iter.bi_size >= blocksize)) {
fs/btrfs/scrub.c
974
ASSERT(bbio->bio.bi_iter.bi_size);
fs/btrfs/scrub.c
990
ASSERT(bbio->bio.bi_iter.bi_size);
fs/btrfs/volumes.h
188
struct bio flush_bio;
fs/btrfs/volumes.h
508
struct bio *orig_bio;
fs/btrfs/volumes.h
676
static inline enum btrfs_map_op btrfs_op(const struct bio *bio)
fs/btrfs/volumes.h
678
switch (bio_op(bio)) {
fs/btrfs/zlib.c
154
struct bio *bio = &cb->bbio.bio;
fs/btrfs/zlib.c
256
if (!bio_add_folio(bio, out_folio, folio_size(out_folio), 0)) {
fs/btrfs/zlib.c
294
if (!bio_add_folio(bio, out_folio, folio_size(out_folio), 0)) {
fs/btrfs/zlib.c
310
if (workspace->strm.total_out > bio->bi_iter.bi_size) {
fs/btrfs/zlib.c
311
const u32 cur_len = workspace->strm.total_out - bio->bi_iter.bi_size;
fs/btrfs/zlib.c
315
if (!bio_add_folio(bio, out_folio, cur_len, 0)) {
fs/btrfs/zlib.c
324
ASSERT(bio->bi_iter.bi_size == workspace->strm.total_out);
fs/btrfs/zlib.c
357
bio_first_folio(&fi, &cb->bbio.bio, 0);
fs/btrfs/zlib.c
419
bio_next_folio(&fi, &cb->bbio.bio);
fs/btrfs/zoned.c
2028
u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
fs/btrfs/zoned.c
2040
if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
fs/btrfs/zoned.c
2067
const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
fs/btrfs/zstd.c
405
struct bio *bio = &cb->bbio.bio;
fs/btrfs/zstd.c
480
if (!bio_add_folio(bio, out_folio, folio_size(out_folio), 0)) {
fs/btrfs/zstd.c
542
if (!bio_add_folio(bio, out_folio, workspace->out_buf.pos, 0)) {
fs/btrfs/zstd.c
554
if (!bio_add_folio(bio, out_folio, folio_size(out_folio), 0)) {
fs/btrfs/zstd.c
574
ASSERT(tot_out == bio->bi_iter.bi_size);
fs/btrfs/zstd.c
600
bio_first_folio(&fi, &cb->bbio.bio, 0);
fs/btrfs/zstd.c
665
bio_next_folio(&fi, &cb->bbio.bio);
fs/buffer.c
2766
static void end_bio_bh_io_sync(struct bio *bio)
fs/buffer.c
2768
struct buffer_head *bh = bio->bi_private;
fs/buffer.c
2770
if (unlikely(bio_flagged(bio, BIO_QUIET)))
fs/buffer.c
2773
bh->b_end_io(bh, !bio->bi_status);
fs/buffer.c
2774
bio_put(bio);
fs/buffer.c
2782
struct bio *bio;
fs/buffer.c
2801
bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
fs/buffer.c
2803
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
fs/buffer.c
2805
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
fs/buffer.c
2806
bio->bi_write_hint = write_hint;
fs/buffer.c
2808
bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
fs/buffer.c
2810
bio->bi_end_io = end_bio_bh_io_sync;
fs/buffer.c
2811
bio->bi_private = bh;
fs/buffer.c
2814
guard_bio_eod(bio);
fs/buffer.c
2817
wbc_init_bio(wbc, bio);
fs/buffer.c
2821
blk_crypto_submit_bio(bio);
fs/crypto/bio.c
103
if (!len || !fscrypt_mergeable_bio(bio, inode, lblk))
fs/crypto/bio.c
108
blk_crypto_submit_bio(bio);
fs/crypto/bio.c
149
struct bio *bio;
fs/crypto/bio.c
181
bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS);
fs/crypto/bio.c
184
bio->bi_iter.bi_sector = sector;
fs/crypto/bio.c
199
ret = bio_add_page(bio, pages[i++], offset, 0);
fs/crypto/bio.c
208
err = submit_bio_wait(bio);
fs/crypto/bio.c
211
bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
fs/crypto/bio.c
215
bio_put(bio);
fs/crypto/bio.c
33
bool fscrypt_decrypt_bio(struct bio *bio)
fs/crypto/bio.c
37
bio_for_each_folio_all(fi, bio) {
fs/crypto/bio.c
42
bio->bi_status = errno_to_blk_status(err);
fs/crypto/bio.c
62
static void fscrypt_zeroout_range_end_io(struct bio *bio)
fs/crypto/bio.c
64
struct fscrypt_zero_done *done = bio->bi_private;
fs/crypto/bio.c
66
if (bio->bi_status)
fs/crypto/bio.c
67
cmpxchg(&done->status, 0, bio->bi_status);
fs/crypto/bio.c
69
bio_put(bio);
fs/crypto/bio.c
84
struct bio *bio;
fs/crypto/bio.c
87
bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE,
fs/crypto/bio.c
89
bio->bi_iter.bi_sector = sector;
fs/crypto/bio.c
90
bio->bi_private = &done;
fs/crypto/bio.c
91
bio->bi_end_io = fscrypt_zeroout_range_end_io;
fs/crypto/bio.c
92
fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
fs/crypto/bio.c
99
__bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
fs/crypto/inline_crypt.c
302
void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
fs/crypto/inline_crypt.c
313
bio_crypt_set_ctx(bio, ci->ci_enc_key.blk_key, dun, gfp_mask);
fs/crypto/inline_crypt.c
350
void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
fs/crypto/inline_crypt.c
358
fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
fs/crypto/inline_crypt.c
383
bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
fs/crypto/inline_crypt.c
386
const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
fs/crypto/inline_crypt.c
405
return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
fs/crypto/inline_crypt.c
419
bool fscrypt_mergeable_bio_bh(struct bio *bio,
fs/crypto/inline_crypt.c
426
return !bio->bi_crypt_context;
fs/crypto/inline_crypt.c
428
return fscrypt_mergeable_bio(bio, inode, next_lblk);
fs/direct-io.c
1269
if (sdio.bio)
fs/direct-io.c
135
struct bio *bio_list; /* singly linked via bi_private */
fs/direct-io.c
328
static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
fs/direct-io.c
333
static void dio_bio_end_aio(struct bio *bio)
fs/direct-io.c
335
struct dio *dio = bio->bi_private;
fs/direct-io.c
342
dio_bio_complete(dio, bio);
fs/direct-io.c
380
static void dio_bio_end_io(struct bio *bio)
fs/direct-io.c
382
struct dio *dio = bio->bi_private;
fs/direct-io.c
386
bio->bi_private = dio->bio_list;
fs/direct-io.c
387
dio->bio_list = bio;
fs/direct-io.c
398
struct bio *bio;
fs/direct-io.c
404
bio = bio_alloc(bdev, nr_vecs, dio->opf, GFP_KERNEL);
fs/direct-io.c
405
bio->bi_iter.bi_sector = first_sector;
fs/direct-io.c
407
bio->bi_end_io = dio_bio_end_aio;
fs/direct-io.c
409
bio->bi_end_io = dio_bio_end_io;
fs/direct-io.c
411
bio_set_flag(bio, BIO_PAGE_PINNED);
fs/direct-io.c
412
bio->bi_write_hint = file_inode(dio->iocb->ki_filp)->i_write_hint;
fs/direct-io.c
414
sdio->bio = bio;
fs/direct-io.c
428
struct bio *bio = sdio->bio;
fs/direct-io.c
431
bio->bi_private = dio;
fs/direct-io.c
438
bio_set_pages_dirty(bio);
fs/direct-io.c
440
dio->bio_disk = bio->bi_bdev->bd_disk;
fs/direct-io.c
442
submit_bio(bio);
fs/direct-io.c
444
sdio->bio = NULL;
fs/direct-io.c
466
static struct bio *dio_await_one(struct dio *dio)
fs/direct-io.c
469
struct bio *bio = NULL;
fs/direct-io.c
489
bio = dio->bio_list;
fs/direct-io.c
490
dio->bio_list = bio->bi_private;
fs/direct-io.c
493
return bio;
fs/direct-io.c
499
static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
fs/direct-io.c
501
blk_status_t err = bio->bi_status;
fs/direct-io.c
506
if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT))
fs/direct-io.c
513
bio_check_pages_dirty(bio); /* transfers ownership */
fs/direct-io.c
515
bio_release_pages(bio, should_dirty);
fs/direct-io.c
516
bio_put(bio);
fs/direct-io.c
530
struct bio *bio;
fs/direct-io.c
532
bio = dio_await_one(dio);
fs/direct-io.c
533
if (bio)
fs/direct-io.c
534
dio_bio_complete(dio, bio);
fs/direct-io.c
535
} while (bio);
fs/direct-io.c
552
struct bio *bio;
fs/direct-io.c
556
bio = dio->bio_list;
fs/direct-io.c
557
dio->bio_list = bio->bi_private;
fs/direct-io.c
559
ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
fs/direct-io.c
692
ret = bio_add_page(sdio->bio, sdio->cur_page,
fs/direct-io.c
70
struct bio *bio; /* bio under assembly */
fs/direct-io.c
725
if (sdio->bio) {
fs/direct-io.c
728
sdio->bio->bi_iter.bi_size;
fs/direct-io.c
749
if (sdio->bio == NULL) {
fs/direct-io.c
835
if (sdio->bio)
fs/erofs/fileio.c
10
struct bio bio;
fs/erofs/fileio.c
141
io->rq->bio.bi_iter.bi_sector =
fs/erofs/fileio.c
145
if (!bio_add_folio(&io->rq->bio, folio, len, cur))
fs/erofs/fileio.c
28
if (ret >= 0 && ret != rq->bio.bi_iter.bi_size)
fs/erofs/fileio.c
30
if (!rq->bio.bi_end_io) {
fs/erofs/fileio.c
31
bio_for_each_folio_all(fi, &rq->bio) {
fs/erofs/fileio.c
35
} else if (ret < 0 && !rq->bio.bi_status) {
fs/erofs/fileio.c
36
rq->bio.bi_status = errno_to_blk_status(ret);
fs/erofs/fileio.c
38
bio_endio(&rq->bio);
fs/erofs/fileio.c
39
bio_uninit(&rq->bio);
fs/erofs/fileio.c
51
rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
fs/erofs/fileio.c
57
iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
fs/erofs/fileio.c
58
rq->bio.bi_iter.bi_size);
fs/erofs/fileio.c
71
bio_init(&rq->bio, NULL, rq->bvecs, ARRAY_SIZE(rq->bvecs), REQ_OP_READ);
fs/erofs/fileio.c
78
struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev)
fs/erofs/fileio.c
80
return &erofs_fileio_rq_alloc(mdev)->bio;
fs/erofs/fileio.c
83
void erofs_fileio_submit_bio(struct bio *bio)
fs/erofs/fileio.c
85
return erofs_fileio_rq_submit(container_of(bio, struct erofs_fileio_rq,
fs/erofs/fileio.c
86
bio));
fs/erofs/fscache.c
165
struct bio bio; /* w/o bdev to share bio_add_page/endio() */
fs/erofs/fscache.c
174
io->bio.bi_status = errno_to_blk_status(transferred_or_error);
fs/erofs/fscache.c
175
bio_endio(&io->bio);
fs/erofs/fscache.c
180
struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev)
fs/erofs/fscache.c
185
bio_init(&io->bio, NULL, io->bvecs, BIO_MAX_VECS, REQ_OP_READ);
fs/erofs/fscache.c
189
return &io->bio;
fs/erofs/fscache.c
192
void erofs_fscache_submit_bio(struct bio *bio)
fs/erofs/fscache.c
194
struct erofs_fscache_bio *io = container_of(bio,
fs/erofs/fscache.c
195
struct erofs_fscache_bio, bio);
fs/erofs/fscache.c
198
iov_iter_bvec(&io->io.iter, ITER_DEST, io->bvecs, bio->bi_vcnt,
fs/erofs/fscache.c
199
bio->bi_iter.bi_size);
fs/erofs/fscache.c
201
bio->bi_iter.bi_sector << 9, &io->io);
fs/erofs/fscache.c
205
bio->bi_status = errno_to_blk_status(ret);
fs/erofs/fscache.c
206
bio_endio(bio);
fs/erofs/internal.h
543
struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev);
fs/erofs/internal.h
544
void erofs_fileio_submit_bio(struct bio *bio);
fs/erofs/internal.h
546
static inline struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev) { return NULL; }
fs/erofs/internal.h
547
static inline void erofs_fileio_submit_bio(struct bio *bio) {}
fs/erofs/internal.h
557
struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev);
fs/erofs/internal.h
558
void erofs_fscache_submit_bio(struct bio *bio);
fs/erofs/internal.h
576
static inline struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev) { return NULL; }
fs/erofs/internal.h
577
static inline void erofs_fscache_submit_bio(struct bio *bio) {}
fs/erofs/zdata.c
1637
static void z_erofs_endio(struct bio *bio)
fs/erofs/zdata.c
1639
struct z_erofs_decompressqueue *q = bio->bi_private;
fs/erofs/zdata.c
1640
blk_status_t err = bio->bi_status;
fs/erofs/zdata.c
1643
bio_for_each_folio_all(fi, bio) {
fs/erofs/zdata.c
1658
if (bio->bi_bdev)
fs/erofs/zdata.c
1659
bio_put(bio);
fs/erofs/zdata.c
1674
struct bio *bio = NULL;
fs/erofs/zdata.c
1713
if (bio && (cur != last_pa ||
fs/erofs/zdata.c
1714
bio->bi_bdev != mdev.m_bdev)) {
fs/erofs/zdata.c
1717
erofs_fileio_submit_bio(bio);
fs/erofs/zdata.c
1719
erofs_fscache_submit_bio(bio);
fs/erofs/zdata.c
1721
submit_bio(bio);
fs/erofs/zdata.c
1727
bio = NULL;
fs/erofs/zdata.c
1745
if (!bio) {
fs/erofs/zdata.c
1747
bio = erofs_fileio_bio_alloc(&mdev);
fs/erofs/zdata.c
1749
bio = erofs_fscache_bio_alloc(&mdev);
fs/erofs/zdata.c
1751
bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
fs/erofs/zdata.c
1753
bio->bi_end_io = z_erofs_endio;
fs/erofs/zdata.c
1754
bio->bi_iter.bi_sector =
fs/erofs/zdata.c
1756
bio->bi_private = q[JQ_SUBMIT];
fs/erofs/zdata.c
1758
bio->bi_opf |= REQ_RAHEAD;
fs/erofs/zdata.c
1762
if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
fs/erofs/zdata.c
1775
if (bio) {
fs/erofs/zdata.c
1777
erofs_fileio_submit_bio(bio);
fs/erofs/zdata.c
1779
erofs_fscache_submit_bio(bio);
fs/erofs/zdata.c
1781
submit_bio(bio);
fs/ext4/ext4.h
297
struct bio *bio; /* Linked list of completed
fs/ext4/ext4.h
306
struct bio *io_bio;
fs/ext4/page-io.c
101
static void ext4_finish_bio(struct bio *bio)
fs/ext4/page-io.c
105
bio_for_each_folio_all(fi, bio) {
fs/ext4/page-io.c
119
if (bio->bi_status) {
fs/ext4/page-io.c
120
int err = blk_status_to_errno(bio->bi_status);
fs/ext4/page-io.c
137
if (bio->bi_status) {
fs/ext4/page-io.c
152
struct bio *bio, *next_bio;
fs/ext4/page-io.c
158
for (bio = io_end->bio; bio; bio = next_bio) {
fs/ext4/page-io.c
159
next_bio = bio->bi_private;
fs/ext4/page-io.c
160
ext4_finish_bio(bio);
fs/ext4/page-io.c
161
bio_put(bio);
fs/ext4/page-io.c
262
WARN_ON(!io_end->bio);
fs/ext4/page-io.c
350
static void ext4_end_bio(struct bio *bio)
fs/ext4/page-io.c
352
ext4_io_end_t *io_end = bio->bi_private;
fs/ext4/page-io.c
353
sector_t bi_sector = bio->bi_iter.bi_sector;
fs/ext4/page-io.c
356
bio->bi_bdev,
fs/ext4/page-io.c
357
(long long) bio->bi_iter.bi_sector,
fs/ext4/page-io.c
358
(unsigned) bio_sectors(bio),
fs/ext4/page-io.c
359
bio->bi_status)) {
fs/ext4/page-io.c
360
ext4_finish_bio(bio);
fs/ext4/page-io.c
361
bio_put(bio);
fs/ext4/page-io.c
364
bio->bi_end_io = NULL;
fs/ext4/page-io.c
366
if (bio->bi_status) {
fs/ext4/page-io.c
371
bio->bi_status, inode->i_ino,
fs/ext4/page-io.c
376
blk_status_to_errno(bio->bi_status));
fs/ext4/page-io.c
385
bio->bi_private = xchg(&io_end->bio, bio);
fs/ext4/page-io.c
393
ext4_finish_bio(bio);
fs/ext4/page-io.c
394
bio_put(bio);
fs/ext4/page-io.c
400
struct bio *bio = io->io_bio;
fs/ext4/page-io.c
402
if (bio) {
fs/ext4/page-io.c
421
struct bio *bio;
fs/ext4/page-io.c
427
bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
fs/ext4/page-io.c
428
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
fs/ext4/page-io.c
429
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
fs/ext4/page-io.c
430
bio->bi_end_io = ext4_end_bio;
fs/ext4/page-io.c
431
bio->bi_private = ext4_get_io_end(io->io_end);
fs/ext4/page-io.c
432
io->io_bio = bio;
fs/ext4/page-io.c
434
wbc_init_bio(io->io_wbc, bio);
fs/ext4/readpage.c
101
struct bio *bio = ctx->bio;
fs/ext4/readpage.c
113
bio->bi_private = NULL;
fs/ext4/readpage.c
115
fsverity_verify_bio(vi, bio);
fs/ext4/readpage.c
117
__read_end_io(bio);
fs/ext4/readpage.c
146
__read_end_io(ctx->bio);
fs/ext4/readpage.c
150
static bool bio_post_read_required(struct bio *bio)
fs/ext4/readpage.c
152
return bio->bi_private && !bio->bi_status;
fs/ext4/readpage.c
167
static void mpage_end_io(struct bio *bio)
fs/ext4/readpage.c
169
if (bio_post_read_required(bio)) {
fs/ext4/readpage.c
170
struct bio_post_read_ctx *ctx = bio->bi_private;
fs/ext4/readpage.c
176
__read_end_io(bio);
fs/ext4/readpage.c
179
static void ext4_set_bio_post_read_ctx(struct bio *bio,
fs/ext4/readpage.c
196
ctx->bio = bio;
fs/ext4/readpage.c
199
bio->bi_private = ctx;
fs/ext4/readpage.c
214
struct bio *bio = NULL;
fs/ext4/readpage.c
344
if (bio && (last_block_in_bio != first_block - 1 ||
fs/ext4/readpage.c
345
!fscrypt_mergeable_bio(bio, inode, next_block))) {
fs/ext4/readpage.c
347
blk_crypto_submit_bio(bio);
fs/ext4/readpage.c
348
bio = NULL;
fs/ext4/readpage.c
350
if (bio == NULL) {
fs/ext4/readpage.c
355
bio = bio_alloc(bdev, bio_max_segs(nr_pages),
fs/ext4/readpage.c
357
fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
fs/ext4/readpage.c
359
ext4_set_bio_post_read_ctx(bio, inode, vi);
fs/ext4/readpage.c
360
bio->bi_iter.bi_sector = first_block << (blkbits - 9);
fs/ext4/readpage.c
361
bio->bi_end_io = mpage_end_io;
fs/ext4/readpage.c
363
bio->bi_opf |= REQ_RAHEAD;
fs/ext4/readpage.c
367
if (!bio_add_folio(bio, folio, length, 0))
fs/ext4/readpage.c
373
blk_crypto_submit_bio(bio);
fs/ext4/readpage.c
374
bio = NULL;
fs/ext4/readpage.c
379
if (bio) {
fs/ext4/readpage.c
380
blk_crypto_submit_bio(bio);
fs/ext4/readpage.c
381
bio = NULL;
fs/ext4/readpage.c
390
if (bio)
fs/ext4/readpage.c
391
blk_crypto_submit_bio(bio);
fs/ext4/readpage.c
65
struct bio *bio;
fs/ext4/readpage.c
72
static void __read_end_io(struct bio *bio)
fs/ext4/readpage.c
76
bio_for_each_folio_all(fi, bio)
fs/ext4/readpage.c
77
folio_end_read(fi.folio, bio->bi_status == 0);
fs/ext4/readpage.c
78
if (bio->bi_private)
fs/ext4/readpage.c
79
mempool_free(bio->bi_private, bio_post_read_ctx_pool);
fs/ext4/readpage.c
80
bio_put(bio);
fs/ext4/readpage.c
89
struct bio *bio = ctx->bio;
fs/ext4/readpage.c
91
if (fscrypt_decrypt_bio(bio))
fs/ext4/readpage.c
94
__read_end_io(bio);
fs/f2fs/compress.c
1122
struct bio *bio = NULL;
fs/f2fs/compress.c
1124
ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
fs/f2fs/compress.c
1130
if (bio)
fs/f2fs/compress.c
1131
f2fs_submit_read_bio(sbi, bio, DATA);
fs/f2fs/compress.c
1480
void f2fs_compress_write_end_io(struct bio *bio, struct folio *folio)
fs/f2fs/compress.c
1483
struct f2fs_sb_info *sbi = bio->bi_private;
fs/f2fs/compress.c
1489
if (unlikely(bio->bi_status != BLK_STS_OK))
fs/f2fs/data.c
1052
if (io->bio &&
fs/f2fs/data.c
1053
(!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
fs/f2fs/data.c
1055
!f2fs_crypt_mergeable_bio(io->bio, fio_inode(fio),
fs/f2fs/data.c
1059
if (io->bio == NULL) {
fs/f2fs/data.c
1060
io->bio = __bio_alloc(fio, BIO_MAX_VECS);
fs/f2fs/data.c
1061
f2fs_set_bio_crypt_ctx(io->bio, fio_inode(fio),
fs/f2fs/data.c
1066
if (!bio_add_folio(io->bio, bio_folio, folio_size(bio_folio), 0)) {
fs/f2fs/data.c
1081
bio_get(io->bio);
fs/f2fs/data.c
1083
io->bi_private = io->bio->bi_private;
fs/f2fs/data.c
1084
io->bio->bi_private = io;
fs/f2fs/data.c
1085
io->bio->bi_end_io = f2fs_zone_write_end_io;
fs/f2fs/data.c
1086
io->zone_pending_bio = io->bio;
fs/f2fs/data.c
1099
static struct bio *f2fs_grab_read_bio(struct inode *inode,
fs/f2fs/data.c
1105
struct bio *bio;
fs/f2fs/data.c
1111
bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
fs/f2fs/data.c
1114
bio->bi_iter.bi_sector = sector;
fs/f2fs/data.c
1115
f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
fs/f2fs/data.c
1116
bio->bi_end_io = f2fs_read_end_io;
fs/f2fs/data.c
1134
ctx->bio = bio;
fs/f2fs/data.c
1140
bio->bi_private = ctx;
fs/f2fs/data.c
1142
iostat_alloc_and_bind_ctx(sbi, bio, ctx);
fs/f2fs/data.c
1144
return bio;
fs/f2fs/data.c
1153
struct bio *bio;
fs/f2fs/data.c
1155
bio = f2fs_grab_read_bio(inode, vi, blkaddr, 1, op_flags, folio->index,
fs/f2fs/data.c
116
struct bio *bio;
fs/f2fs/data.c
1161
if (!bio_add_folio(bio, folio, PAGE_SIZE, 0))
fs/f2fs/data.c
1166
f2fs_submit_read_bio(sbi, bio, DATA);
fs/f2fs/data.c
144
static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
fs/f2fs/data.c
147
struct bio_post_read_ctx *ctx = bio->bi_private;
fs/f2fs/data.c
150
bio_for_each_folio_all(fi, bio) {
fs/f2fs/data.c
179
bio->bi_status = BLK_STS_IOERR;
fs/f2fs/data.c
182
folio_end_read(folio, bio->bi_status == BLK_STS_OK);
fs/f2fs/data.c
187
bio_put(bio);
fs/f2fs/data.c
194
struct bio *bio = ctx->bio;
fs/f2fs/data.c
206
bio->bi_private = NULL;
fs/f2fs/data.c
215
bio_for_each_folio_all(fi, bio) {
fs/f2fs/data.c
2161
struct bio **bio_ret,
fs/f2fs/data.c
2165
struct bio *bio = *bio_ret;
fs/f2fs/data.c
220
bio->bi_status = BLK_STS_IOERR;
fs/f2fs/data.c
2232
if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
fs/f2fs/data.c
2234
!f2fs_crypt_mergeable_bio(bio, inode, index, NULL))) {
fs/f2fs/data.c
2236
f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
fs/f2fs/data.c
2237
bio = NULL;
fs/f2fs/data.c
2239
if (bio == NULL)
fs/f2fs/data.c
2240
bio = f2fs_grab_read_bio(inode, vi, block_nr, nr_pages,
fs/f2fs/data.c
2249
if (!bio_add_folio(bio, folio, blocksize, 0))
fs/f2fs/data.c
225
fsverity_verify_bio(vi, bio);
fs/f2fs/data.c
2257
*bio_ret = bio;
fs/f2fs/data.c
2262
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
fs/f2fs/data.c
2269
struct bio *bio = *bio_ret;
fs/f2fs/data.c
228
f2fs_finish_read_bio(bio, true);
fs/f2fs/data.c
2382
if (bio && (!page_is_mergeable(sbi, bio,
fs/f2fs/data.c
2384
!f2fs_crypt_mergeable_bio(bio, inode, folio->index, NULL))) {
fs/f2fs/data.c
2386
f2fs_submit_read_bio(sbi, bio, DATA);
fs/f2fs/data.c
2387
bio = NULL;
fs/f2fs/data.c
2390
if (!bio)
fs/f2fs/data.c
2391
bio = f2fs_grab_read_bio(inode, cc->vi, blkaddr,
fs/f2fs/data.c
2396
if (!bio_add_folio(bio, folio, blocksize, 0))
fs/f2fs/data.c
2399
ctx = get_post_read_ctx(bio);
fs/f2fs/data.c
240
static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
fs/f2fs/data.c
2411
*bio_ret = bio;
fs/f2fs/data.c
242
struct bio_post_read_ctx *ctx = bio->bi_private;
fs/f2fs/data.c
2424
*bio_ret = bio;
fs/f2fs/data.c
2465
struct bio *bio = NULL;
fs/f2fs/data.c
248
f2fs_finish_read_bio(bio, in_task);
fs/f2fs/data.c
2557
if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
fs/f2fs/data.c
2559
!f2fs_crypt_mergeable_bio(bio, inode, index, NULL))) {
fs/f2fs/data.c
2561
f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
fs/f2fs/data.c
2562
bio = NULL;
fs/f2fs/data.c
2564
if (bio == NULL)
fs/f2fs/data.c
2565
bio = f2fs_grab_read_bio(inode, vi,
fs/f2fs/data.c
2576
if (!bio_add_folio(bio, folio, F2FS_BLKSIZE,
fs/f2fs/data.c
2598
f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
fs/f2fs/data.c
2615
struct bio *bio = NULL;
fs/f2fs/data.c
2673
ret = f2fs_read_multi_pages(&cc, &bio,
fs/f2fs/data.c
268
bio_for_each_folio_all(fi, ctx->bio) {
fs/f2fs/data.c
2707
&map, &bio, &last_block_in_bio,
fs/f2fs/data.c
2725
ret = f2fs_read_multi_pages(&cc, &bio,
fs/f2fs/data.c
2734
f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
fs/f2fs/data.c
295
struct bio *bio = ctx->bio;
fs/f2fs/data.c
297
if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) {
fs/f2fs/data.c
298
f2fs_finish_read_bio(bio, true);
fs/f2fs/data.c
305
f2fs_verify_and_finish_bio(bio, true);
fs/f2fs/data.c
3054
struct bio **bio,
fs/f2fs/data.c
308
static void f2fs_read_end_io(struct bio *bio)
fs/f2fs/data.c
3086
.bio = bio,
fs/f2fs/data.c
310
struct f2fs_sb_info *sbi = F2FS_F_SB(bio_first_folio_all(bio));
fs/f2fs/data.c
314
iostat_update_and_unbind_ctx(bio);
fs/f2fs/data.c
315
ctx = bio->bi_private;
fs/f2fs/data.c
318
bio->bi_status = BLK_STS_IOERR;
fs/f2fs/data.c
3191
if (bio && *bio)
fs/f2fs/data.c
3192
f2fs_submit_merged_ipu_write(sbi, bio, NULL);
fs/f2fs/data.c
320
if (bio->bi_status != BLK_STS_OK) {
fs/f2fs/data.c
321
f2fs_finish_read_bio(bio, intask);
fs/f2fs/data.c
3230
struct bio *bio = NULL;
fs/f2fs/data.c
3420
&submitted, &bio, &last_block,
fs/f2fs/data.c
343
f2fs_verify_and_finish_bio(bio, intask);
fs/f2fs/data.c
346
static void f2fs_write_end_io(struct bio *bio)
fs/f2fs/data.c
3490
if (bio)
fs/f2fs/data.c
3491
f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
fs/f2fs/data.c
351
iostat_update_and_unbind_ctx(bio);
fs/f2fs/data.c
352
sbi = bio->bi_private;
fs/f2fs/data.c
355
bio->bi_status = BLK_STS_IOERR;
fs/f2fs/data.c
357
bio_for_each_folio_all(fi, bio) {
fs/f2fs/data.c
370
f2fs_compress_write_end_io(bio, folio);
fs/f2fs/data.c
377
if (unlikely(bio->bi_status != BLK_STS_OK)) {
fs/f2fs/data.c
406
bio_put(bio);
fs/f2fs/data.c
410
static void f2fs_zone_write_end_io(struct bio *bio)
fs/f2fs/data.c
412
struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private;
fs/f2fs/data.c
414
bio->bi_private = io->bi_private;
fs/f2fs/data.c
416
f2fs_write_end_io(bio);
fs/f2fs/data.c
491
static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
fs/f2fs/data.c
496
struct bio *bio;
fs/f2fs/data.c
499
bio = bio_alloc_bioset(bdev, npages,
fs/f2fs/data.c
502
bio->bi_iter.bi_sector = sector;
fs/f2fs/data.c
504
bio->bi_end_io = f2fs_read_end_io;
fs/f2fs/data.c
505
bio->bi_private = NULL;
fs/f2fs/data.c
507
bio->bi_end_io = f2fs_write_end_io;
fs/f2fs/data.c
508
bio->bi_private = sbi;
fs/f2fs/data.c
509
bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
fs/f2fs/data.c
512
iostat_alloc_and_bind_ctx(sbi, bio, NULL);
fs/f2fs/data.c
515
wbc_init_bio(fio->io_wbc, bio);
fs/f2fs/data.c
517
return bio;
fs/f2fs/data.c
520
static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
fs/f2fs/data.c
530
fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
fs/f2fs/data.c
533
static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
fs/f2fs/data.c
542
return !bio_has_crypt_ctx(bio);
fs/f2fs/data.c
544
return fscrypt_mergeable_bio(bio, inode, next_idx);
fs/f2fs/data.c
547
void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
fs/f2fs/data.c
550
if (!bio)
fs/f2fs/data.c
553
WARN_ON_ONCE(!is_read_io(bio_op(bio)));
fs/f2fs/data.c
554
trace_f2fs_submit_read_bio(sbi->sb, type, bio);
fs/f2fs/data.c
556
iostat_update_submit_ctx(bio, type);
fs/f2fs/data.c
557
blk_crypto_submit_bio(bio);
fs/f2fs/data.c
560
static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
fs/f2fs/data.c
563
WARN_ON_ONCE(is_read_io(bio_op(bio)));
fs/f2fs/data.c
564
trace_f2fs_submit_write_bio(sbi->sb, type, bio);
fs/f2fs/data.c
565
iostat_update_submit_ctx(bio, type);
fs/f2fs/data.c
566
blk_crypto_submit_bio(bio);
fs/f2fs/data.c
573
if (!io->bio)
fs/f2fs/data.c
577
trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
fs/f2fs/data.c
578
f2fs_submit_read_bio(io->sbi, io->bio, fio->type);
fs/f2fs/data.c
580
trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
fs/f2fs/data.c
581
f2fs_submit_write_bio(io->sbi, io->bio, fio->type);
fs/f2fs/data.c
583
io->bio = NULL;
fs/f2fs/data.c
586
static bool __has_merged_page(struct bio *bio, struct inode *inode,
fs/f2fs/data.c
591
if (!bio)
fs/f2fs/data.c
597
bio_for_each_folio_all(fi, bio) {
fs/f2fs/data.c
642
io->bio = NULL;
fs/f2fs/data.c
668
if (!io->bio)
fs/f2fs/data.c
674
io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC;
fs/f2fs/data.c
676
io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
fs/f2fs/data.c
698
ret = __has_merged_page(io->bio, inode, folio, ino);
fs/f2fs/data.c
749
struct bio *bio;
fs/f2fs/data.c
762
bio = __bio_alloc(fio, 1);
fs/f2fs/data.c
764
f2fs_set_bio_crypt_ctx(bio, fio_folio->mapping->host,
fs/f2fs/data.c
766
bio_add_folio_nofail(bio, data_folio, folio_size(data_folio), 0);
fs/f2fs/data.c
774
if (is_read_io(bio_op(bio)))
fs/f2fs/data.c
775
f2fs_submit_read_bio(fio->sbi, bio, fio->type);
fs/f2fs/data.c
777
f2fs_submit_write_bio(fio->sbi, bio, fio->type);
fs/f2fs/data.c
781
static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
fs/f2fs/data.c
785
bio->bi_iter.bi_size >= sbi->max_io_bytes))
fs/f2fs/data.c
789
return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
fs/f2fs/data.c
802
static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
fs/f2fs/data.c
808
if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
fs/f2fs/data.c
813
static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
fs/f2fs/data.c
820
be->bio = bio;
fs/f2fs/data.c
821
bio_get(bio);
fs/f2fs/data.c
823
bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
fs/f2fs/data.c
836
static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
fs/f2fs/data.c
852
if (be->bio != *bio)
fs/f2fs/data.c
857
f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
fs/f2fs/data.c
860
if (f2fs_crypt_mergeable_bio(*bio,
fs/f2fs/data.c
863
bio_add_folio(*bio, folio, folio_size(folio), 0)) {
fs/f2fs/data.c
870
f2fs_submit_write_bio(sbi, *bio, DATA);
fs/f2fs/data.c
877
bio_put(*bio);
fs/f2fs/data.c
878
*bio = NULL;
fs/f2fs/data.c
885
struct bio **bio, struct folio *folio)
fs/f2fs/data.c
889
struct bio *target = bio ? *bio : NULL;
fs/f2fs/data.c
904
found = (target == be->bio);
fs/f2fs/data.c
906
found = __has_merged_page(be->bio, NULL,
fs/f2fs/data.c
921
found = (target == be->bio);
fs/f2fs/data.c
923
found = __has_merged_page(be->bio, NULL,
fs/f2fs/data.c
926
target = be->bio;
fs/f2fs/data.c
936
if (bio && *bio) {
fs/f2fs/data.c
937
bio_put(*bio);
fs/f2fs/data.c
938
*bio = NULL;
fs/f2fs/data.c
944
struct bio *bio = *fio->bio;
fs/f2fs/data.c
955
if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
fs/f2fs/data.c
957
f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
fs/f2fs/data.c
959
if (!bio) {
fs/f2fs/data.c
960
bio = __bio_alloc(fio, BIO_MAX_VECS);
fs/f2fs/data.c
961
f2fs_set_bio_crypt_ctx(bio, folio->mapping->host,
fs/f2fs/data.c
964
add_bio_entry(fio->sbi, bio, data_folio, fio->temp);
fs/f2fs/data.c
966
if (add_ipu_page(fio, &bio, data_folio))
fs/f2fs/data.c
976
*fio->bio = bio;
fs/f2fs/f2fs.h
1363
struct bio **bio; /* bio for ipu */
fs/f2fs/f2fs.h
1368
struct bio *bio;
fs/f2fs/f2fs.h
1375
struct bio *bio; /* bios to merge */
fs/f2fs/f2fs.h
1380
struct bio *zone_pending_bio; /* pending bio for the previous zone */
fs/f2fs/f2fs.h
4140
void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
fs/f2fs/f2fs.h
4150
struct bio **bio, struct folio *folio);
fs/f2fs/f2fs.h
4180
struct bio **bio, sector_t *last_block,
fs/f2fs/f2fs.h
4629
void f2fs_compress_write_end_io(struct bio *bio, struct folio *folio);
fs/f2fs/f2fs.h
4652
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
fs/f2fs/file.c
5059
struct bio *bio, loff_t file_offset)
fs/f2fs/file.c
5066
bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, DATA, temp);
fs/f2fs/file.c
5067
blk_crypto_submit_bio(bio);
fs/f2fs/iostat.c
240
void iostat_update_and_unbind_ctx(struct bio *bio)
fs/f2fs/iostat.c
242
struct bio_iostat_ctx *iostat_ctx = bio->bi_private;
fs/f2fs/iostat.c
245
if (op_is_write(bio_op(bio))) {
fs/f2fs/iostat.c
246
lat_type = bio->bi_opf & REQ_SYNC ?
fs/f2fs/iostat.c
248
bio->bi_private = iostat_ctx->sbi;
fs/f2fs/iostat.c
251
bio->bi_private = iostat_ctx->post_read_ctx;
fs/f2fs/iostat.c
259
struct bio *bio, struct bio_post_read_ctx *ctx)
fs/f2fs/iostat.c
268
bio->bi_private = iostat_ctx;
fs/f2fs/iostat.h
45
static inline void iostat_update_submit_ctx(struct bio *bio,
fs/f2fs/iostat.h
48
struct bio_iostat_ctx *iostat_ctx = bio->bi_private;
fs/f2fs/iostat.h
54
static inline struct bio_post_read_ctx *get_post_read_ctx(struct bio *bio)
fs/f2fs/iostat.h
56
struct bio_iostat_ctx *iostat_ctx = bio->bi_private;
fs/f2fs/iostat.h
61
extern void iostat_update_and_unbind_ctx(struct bio *bio);
fs/f2fs/iostat.h
63
struct bio *bio, struct bio_post_read_ctx *ctx);
fs/f2fs/iostat.h
71
static inline void iostat_update_and_unbind_ctx(struct bio *bio) {}
fs/f2fs/iostat.h
73
struct bio *bio, struct bio_post_read_ctx *ctx) {}
fs/f2fs/iostat.h
74
static inline void iostat_update_submit_ctx(struct bio *bio,
fs/f2fs/iostat.h
76
static inline struct bio_post_read_ctx *get_post_read_ctx(struct bio *bio)
fs/f2fs/iostat.h
78
return bio->bi_private;
fs/f2fs/segment.c
1145
static void f2fs_submit_discard_endio(struct bio *bio)
fs/f2fs/segment.c
1147
struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
fs/f2fs/segment.c
1152
dc->error = blk_status_to_errno(bio->bi_status);
fs/f2fs/segment.c
1159
bio_put(bio);
fs/f2fs/segment.c
1247
struct bio *bio = bio_alloc(bdev, 0, REQ_OP_ZONE_RESET | flag, GFP_NOFS);
fs/f2fs/segment.c
1267
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(dc->di.start);
fs/f2fs/segment.c
1268
bio->bi_private = dc;
fs/f2fs/segment.c
1269
bio->bi_end_io = f2fs_submit_discard_endio;
fs/f2fs/segment.c
1270
submit_bio(bio);
fs/f2fs/segment.c
1331
struct bio *bio = NULL;
fs/f2fs/segment.c
1347
SECTOR_FROM_BLOCK(len), GFP_NOFS, &bio);
fs/f2fs/segment.c
1348
f2fs_bug_on(sbi, !bio);
fs/f2fs/segment.c
1369
bio->bi_private = dc;
fs/f2fs/segment.c
1370
bio->bi_end_io = f2fs_submit_discard_endio;
fs/f2fs/segment.c
1371
bio->bi_opf |= flag;
fs/f2fs/segment.c
1372
submit_bio(bio);
fs/f2fs/segment.c
4088
if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi))
fs/f2fs/segment.c
4101
if (fio->bio && *(fio->bio)) {
fs/f2fs/segment.c
4102
struct bio *bio = *(fio->bio);
fs/f2fs/segment.c
4104
bio->bi_status = BLK_STS_IOERR;
fs/f2fs/segment.c
4105
bio_endio(bio);
fs/f2fs/segment.c
4106
*(fio->bio) = NULL;
fs/f2fs/super.c
3842
struct bio *bio;
fs/f2fs/super.c
3857
bio = bio_alloc(sbi->sb->s_bdev, 1, opf, GFP_NOFS);
fs/f2fs/super.c
3860
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio->index);
fs/f2fs/super.c
3862
if (!bio_add_folio(bio, folio, folio_size(folio), 0))
fs/f2fs/super.c
3865
ret = submit_bio_wait(bio);
fs/f2fs/super.c
3866
bio_put(bio);
fs/gfs2/incore.h
516
struct bio *jd_log_bio;
fs/gfs2/lops.c
200
static void gfs2_end_log_write(struct bio *bio)
fs/gfs2/lops.c
202
struct gfs2_sbd *sdp = bio->bi_private;
fs/gfs2/lops.c
206
if (bio->bi_status) {
fs/gfs2/lops.c
207
int err = blk_status_to_errno(bio->bi_status);
fs/gfs2/lops.c
215
bio_for_each_segment_all(bvec, bio, iter_all) {
fs/gfs2/lops.c
221
bvec->bv_len, bio->bi_status);
fs/gfs2/lops.c
226
bio_put(bio);
fs/gfs2/lops.c
239
void gfs2_log_submit_write(struct bio **biop)
fs/gfs2/lops.c
241
struct bio *bio = *biop;
fs/gfs2/lops.c
242
if (bio) {
fs/gfs2/lops.c
243
struct gfs2_sbd *sdp = bio->bi_private;
fs/gfs2/lops.c
245
submit_bio(bio);
fs/gfs2/lops.c
262
static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
fs/gfs2/lops.c
266
struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, opf, GFP_NOIO);
fs/gfs2/lops.c
268
bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
fs/gfs2/lops.c
269
bio->bi_end_io = end_io;
fs/gfs2/lops.c
270
bio->bi_private = sdp;
fs/gfs2/lops.c
272
return bio;
fs/gfs2/lops.c
292
static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
fs/gfs2/lops.c
293
struct bio **biop, blk_opf_t opf,
fs/gfs2/lops.c
296
struct bio *bio = *biop;
fs/gfs2/lops.c
298
if (bio) {
fs/gfs2/lops.c
301
nblk = bio_end_sector(bio);
fs/gfs2/lops.c
304
return bio;
fs/gfs2/lops.c
331
struct bio *bio;
fs/gfs2/lops.c
334
bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, opf,
fs/gfs2/lops.c
336
ret = bio_add_page(bio, page, size, offset);
fs/gfs2/lops.c
338
bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio,
fs/gfs2/lops.c
340
ret = bio_add_page(bio, page, size, offset);
fs/gfs2/lops.c
394
static void gfs2_end_log_read(struct bio *bio)
fs/gfs2/lops.c
396
int error = blk_status_to_errno(bio->bi_status);
fs/gfs2/lops.c
399
bio_for_each_folio_all(fi, bio) {
fs/gfs2/lops.c
405
bio_put(bio);
fs/gfs2/lops.c
481
static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs,
fs/gfs2/lops.c
484
struct bio *new;
fs/gfs2/lops.c
515
struct bio *bio = NULL;
fs/gfs2/lops.c
540
if (bio && (off || block < blocks_submitted + max_blocks)) {
fs/gfs2/lops.c
543
if (bio_end_sector(bio) == sector) {
fs/gfs2/lops.c
544
if (bio_add_folio(bio, folio, bsize, off))
fs/gfs2/lops.c
551
bio = gfs2_chain_bio(bio, blocks, sector,
fs/gfs2/lops.c
557
if (bio) {
fs/gfs2/lops.c
559
submit_bio(bio);
fs/gfs2/lops.c
562
bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read,
fs/gfs2/lops.c
565
bio_add_folio_nofail(bio, folio, bsize, off);
fs/gfs2/lops.c
583
if (bio)
fs/gfs2/lops.c
584
submit_bio(bio);
fs/gfs2/lops.h
20
void gfs2_log_submit_write(struct bio **biop);
fs/gfs2/meta_io.c
201
static void gfs2_meta_read_endio(struct bio *bio)
fs/gfs2/meta_io.c
205
bio_for_each_folio_all(fi, bio) {
fs/gfs2/meta_io.c
215
bh->b_end_io(bh, !bio->bi_status);
fs/gfs2/meta_io.c
219
bio_put(bio);
fs/gfs2/meta_io.c
230
struct bio *bio;
fs/gfs2/meta_io.c
232
bio = bio_alloc(bh->b_bdev, num, opf, GFP_NOIO);
fs/gfs2/meta_io.c
233
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> SECTOR_SHIFT);
fs/gfs2/meta_io.c
236
if (!bio_add_folio(bio, bh->b_folio, bh->b_size, bh_offset(bh))) {
fs/gfs2/meta_io.c
237
BUG_ON(bio->bi_iter.bi_size == 0);
fs/gfs2/meta_io.c
243
bio->bi_end_io = gfs2_meta_read_endio;
fs/gfs2/meta_io.c
244
submit_bio(bio);
fs/iomap/bio.c
101
bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
fs/iomap/bio.c
108
if (!bio)
fs/iomap/bio.c
109
bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
fs/iomap/bio.c
111
bio->bi_opf |= REQ_RAHEAD;
fs/iomap/bio.c
112
bio->bi_iter.bi_sector = sector;
fs/iomap/bio.c
113
bio->bi_end_io = iomap_read_end_io;
fs/iomap/bio.c
114
bio_add_folio_nofail(bio, folio, plen, poff);
fs/iomap/bio.c
115
ctx->read_ctx = bio;
fs/iomap/bio.c
131
struct bio bio;
fs/iomap/bio.c
133
bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
fs/iomap/bio.c
134
bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
fs/iomap/bio.c
135
bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
fs/iomap/bio.c
136
return submit_bio_wait(&bio);
fs/iomap/bio.c
14
static void __iomap_read_end_io(struct bio *bio)
fs/iomap/bio.c
16
int error = blk_status_to_errno(bio->bi_status);
fs/iomap/bio.c
19
bio_for_each_folio_all(fi, bio)
fs/iomap/bio.c
21
bio_put(bio);
fs/iomap/bio.c
28
struct bio *bio;
fs/iomap/bio.c
36
while ((bio = bio_list_pop(&tmp)) != NULL) {
fs/iomap/bio.c
37
__iomap_read_end_io(bio);
fs/iomap/bio.c
44
static void iomap_fail_buffered_read(struct bio *bio)
fs/iomap/bio.c
56
bio_list_add(&failed_read_list, bio);
fs/iomap/bio.c
60
static void iomap_read_end_io(struct bio *bio)
fs/iomap/bio.c
62
if (bio->bi_status) {
fs/iomap/bio.c
63
iomap_fail_buffered_read(bio);
fs/iomap/bio.c
67
__iomap_read_end_io(bio);
fs/iomap/bio.c
72
struct bio *bio = ctx->read_ctx;
fs/iomap/bio.c
74
if (bio)
fs/iomap/bio.c
75
submit_bio(bio);
fs/iomap/bio.c
87
struct bio *bio = ctx->read_ctx;
fs/iomap/bio.c
90
if (!bio || bio_end_sector(bio) != sector ||
fs/iomap/bio.c
91
!bio_add_folio(bio, folio, plen, poff)) {
fs/iomap/bio.c
96
if (bio)
fs/iomap/bio.c
97
submit_bio(bio);
fs/iomap/direct-io.c
239
static void __iomap_dio_bio_end_io(struct bio *bio, bool inline_completion)
fs/iomap/direct-io.c
241
struct iomap_dio *dio = bio->bi_private;
fs/iomap/direct-io.c
244
bio_iov_iter_unbounce(bio, !!dio->error,
fs/iomap/direct-io.c
246
bio_put(bio);
fs/iomap/direct-io.c
248
bio_check_pages_dirty(bio);
fs/iomap/direct-io.c
250
bio_release_pages(bio, false);
fs/iomap/direct-io.c
251
bio_put(bio);
fs/iomap/direct-io.c
267
void iomap_dio_bio_end_io(struct bio *bio)
fs/iomap/direct-io.c
269
struct iomap_dio *dio = bio->bi_private;
fs/iomap/direct-io.c
271
if (bio->bi_status)
fs/iomap/direct-io.c
272
iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
fs/iomap/direct-io.c
273
__iomap_dio_bio_end_io(bio, false);
fs/iomap/direct-io.c
298
struct bio *bio;
fs/iomap/direct-io.c
312
bio = iomap_dio_alloc_bio(iter, dio, nr_vecs,
fs/iomap/direct-io.c
314
fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
fs/iomap/direct-io.c
316
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
fs/iomap/direct-io.c
317
bio->bi_private = dio;
fs/iomap/direct-io.c
318
bio->bi_end_io = iomap_dio_bio_end_io;
fs/iomap/direct-io.c
323
bio_add_folio_nofail(bio, zero_folio, io_len, 0);
fs/iomap/direct-io.c
326
iomap_dio_submit_bio(iter, dio, bio, pos);
fs/iomap/direct-io.c
336
struct bio *bio;
fs/iomap/direct-io.c
344
bio = iomap_dio_alloc_bio(iter, dio, nr_vecs, op);
fs/iomap/direct-io.c
345
fscrypt_set_bio_crypt_ctx(bio, iter->inode,
fs/iomap/direct-io.c
347
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
fs/iomap/direct-io.c
348
bio->bi_write_hint = iter->inode->i_write_hint;
fs/iomap/direct-io.c
349
bio->bi_ioprio = dio->iocb->ki_ioprio;
fs/iomap/direct-io.c
350
bio->bi_private = dio;
fs/iomap/direct-io.c
351
bio->bi_end_io = iomap_dio_bio_end_io;
fs/iomap/direct-io.c
354
ret = bio_iov_iter_bounce(bio, dio->submit.iter);
fs/iomap/direct-io.c
356
ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
fs/iomap/direct-io.c
360
ret = bio->bi_iter.bi_size;
fs/iomap/direct-io.c
375
bio_set_pages_dirty(bio);
fs/iomap/direct-io.c
382
iomap_dio_submit_bio(iter, dio, bio, pos);
fs/iomap/direct-io.c
386
bio_put(bio);
fs/iomap/direct-io.c
53
static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
fs/iomap/direct-io.c
63
struct iomap_dio *dio, struct bio *bio, loff_t pos)
fs/iomap/direct-io.c
71
bio_set_polled(bio, iocb);
fs/iomap/direct-io.c
72
WRITE_ONCE(iocb->private, bio);
fs/iomap/direct-io.c
76
dio->dops->submit_io(iter, bio, pos);
fs/iomap/direct-io.c
79
blk_crypto_submit_bio(bio);
fs/iomap/ioend.c
113
static void ioend_writeback_end_bio(struct bio *bio)
fs/iomap/ioend.c
115
struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
fs/iomap/ioend.c
117
ioend->io_error = blk_status_to_errno(bio->bi_status);
fs/iomap/ioend.c
155
struct bio *bio;
fs/iomap/ioend.c
157
bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
fs/iomap/ioend.c
160
bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
fs/iomap/ioend.c
161
bio->bi_write_hint = wpc->inode->i_write_hint;
fs/iomap/ioend.c
162
wbc_init_bio(wpc->wbc, bio);
fs/iomap/ioend.c
164
return iomap_init_ioend(wpc->inode, bio, pos, ioend_flags);
fs/iomap/ioend.c
17
struct bio *bio, loff_t file_offset, u16 ioend_flags)
fs/iomap/ioend.c
19
struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
fs/iomap/ioend.c
28
ioend->io_size = bio->bi_iter.bi_size;
fs/iomap/ioend.c
29
ioend->io_sector = bio->bi_iter.bi_sector;
fs/iomap/ioend.c
303
struct bio *bio = &ioend->io_bio;
fs/iomap/ioend.c
306
bio_put(bio);
fs/iomap/ioend.c
43
struct bio *bio = &ioend->io_bio;
fs/iomap/ioend.c
439
struct bio *bio = &ioend->io_bio;
fs/iomap/ioend.c
443
struct bio *split;
fs/iomap/ioend.c
446
struct queue_limits *lim = bdev_limits(bio->bi_bdev);
fs/iomap/ioend.c
451
sector_offset = bio_split_rw_at(bio, lim, &nr_segs, max_len);
fs/iomap/ioend.c
457
if (bio->bi_iter.bi_size <= max_len)
fs/iomap/ioend.c
466
split = bio_split(bio, sector_offset, GFP_NOFS, &iomap_ioend_bioset);
fs/iomap/ioend.c
469
split->bi_private = bio->bi_private;
fs/iomap/ioend.c
470
split->bi_end_io = bio->bi_end_io;
fs/iomap/ioend.c
49
if (!bio_flagged(bio, BIO_QUIET)) {
fs/iomap/ioend.c
58
bio_for_each_folio_all(fi, bio) {
fs/iomap/ioend.c
68
bio_put(bio); /* frees the ioend */
fs/jfs/jfs_logmgr.c
1963
struct bio *bio;
fs/jfs/jfs_logmgr.c
1974
bio = bio_alloc(file_bdev(log->bdev_file), 1, REQ_OP_READ, GFP_NOFS);
fs/jfs/jfs_logmgr.c
1975
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
fs/jfs/jfs_logmgr.c
1976
__bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
fs/jfs/jfs_logmgr.c
1977
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
fs/jfs/jfs_logmgr.c
1979
bio->bi_end_io = lbmIODone;
fs/jfs/jfs_logmgr.c
1980
bio->bi_private = bp;
fs/jfs/jfs_logmgr.c
1983
bio->bi_iter.bi_size = 0;
fs/jfs/jfs_logmgr.c
1984
lbmIODone(bio);
fs/jfs/jfs_logmgr.c
1986
submit_bio(bio);
fs/jfs/jfs_logmgr.c
2110
struct bio *bio;
fs/jfs/jfs_logmgr.c
2119
bio = bio_alloc(bdev, 1, REQ_OP_WRITE | REQ_SYNC,
fs/jfs/jfs_logmgr.c
2121
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
fs/jfs/jfs_logmgr.c
2122
__bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
fs/jfs/jfs_logmgr.c
2123
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
fs/jfs/jfs_logmgr.c
2125
bio->bi_end_io = lbmIODone;
fs/jfs/jfs_logmgr.c
2126
bio->bi_private = bp;
fs/jfs/jfs_logmgr.c
2130
bio->bi_iter.bi_size = 0;
fs/jfs/jfs_logmgr.c
2131
lbmIODone(bio);
fs/jfs/jfs_logmgr.c
2133
submit_bio(bio);
fs/jfs/jfs_logmgr.c
2169
static void lbmIODone(struct bio *bio)
fs/jfs/jfs_logmgr.c
2171
struct lbuf *bp = bio->bi_private;
fs/jfs/jfs_logmgr.c
2185
if (bio->bi_status) {
fs/jfs/jfs_logmgr.c
2191
bio_put(bio);
fs/jfs/jfs_metapage.c
357
static void metapage_read_end_io(struct bio *bio)
fs/jfs/jfs_metapage.c
359
struct folio *folio = bio->bi_private;
fs/jfs/jfs_metapage.c
361
dec_io(folio, bio->bi_status, last_read_complete);
fs/jfs/jfs_metapage.c
362
bio_put(bio);
fs/jfs/jfs_metapage.c
413
static void metapage_write_end_io(struct bio *bio)
fs/jfs/jfs_metapage.c
415
struct folio *folio = bio->bi_private;
fs/jfs/jfs_metapage.c
419
dec_io(folio, bio->bi_status, last_write_complete);
fs/jfs/jfs_metapage.c
420
bio_put(bio);
fs/jfs/jfs_metapage.c
426
struct bio *bio = NULL;
fs/jfs/jfs_metapage.c
470
if (bio) {
fs/jfs/jfs_metapage.c
479
bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
fs/jfs/jfs_metapage.c
485
if (!bio->bi_iter.bi_size)
fs/jfs/jfs_metapage.c
487
submit_bio(bio);
fs/jfs/jfs_metapage.c
489
bio = NULL;
fs/jfs/jfs_metapage.c
505
bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
fs/jfs/jfs_metapage.c
506
bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
fs/jfs/jfs_metapage.c
507
bio->bi_end_io = metapage_write_end_io;
fs/jfs/jfs_metapage.c
508
bio->bi_private = folio;
fs/jfs/jfs_metapage.c
517
if (bio) {
fs/jfs/jfs_metapage.c
518
bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
fs/jfs/jfs_metapage.c
519
if (!bio->bi_iter.bi_size)
fs/jfs/jfs_metapage.c
522
submit_bio(bio);
fs/jfs/jfs_metapage.c
539
4, bio, sizeof(*bio), 0);
fs/jfs/jfs_metapage.c
540
bio_put(bio);
fs/jfs/jfs_metapage.c
567
struct bio *bio = NULL;
fs/jfs/jfs_metapage.c
588
if (bio)
fs/jfs/jfs_metapage.c
589
submit_bio(bio);
fs/jfs/jfs_metapage.c
591
bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
fs/jfs/jfs_metapage.c
593
bio->bi_iter.bi_sector =
fs/jfs/jfs_metapage.c
595
bio->bi_end_io = metapage_read_end_io;
fs/jfs/jfs_metapage.c
596
bio->bi_private = folio;
fs/jfs/jfs_metapage.c
599
bio_add_folio_nofail(bio, folio, len, offset);
fs/jfs/jfs_metapage.c
604
if (bio)
fs/jfs/jfs_metapage.c
605
submit_bio(bio);
fs/mpage.c
132
struct bio *bio;
fs/mpage.c
282
if (args->bio && (args->last_block_in_bio != first_block - 1))
fs/mpage.c
283
args->bio = mpage_bio_submit_read(args->bio);
fs/mpage.c
286
if (args->bio == NULL) {
fs/mpage.c
287
args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), opf,
fs/mpage.c
289
if (args->bio == NULL)
fs/mpage.c
291
args->bio->bi_iter.bi_sector = first_block << (blkbits - 9);
fs/mpage.c
295
if (!bio_add_folio(args->bio, folio, length, 0)) {
fs/mpage.c
296
args->bio = mpage_bio_submit_read(args->bio);
fs/mpage.c
304
args->bio = mpage_bio_submit_read(args->bio);
fs/mpage.c
311
if (args->bio)
fs/mpage.c
312
args->bio = mpage_bio_submit_read(args->bio);
fs/mpage.c
379
if (args.bio)
fs/mpage.c
380
mpage_bio_submit_read(args.bio);
fs/mpage.c
396
if (args.bio)
fs/mpage.c
397
mpage_bio_submit_read(args.bio);
fs/mpage.c
420
struct bio *bio;
fs/mpage.c
457
struct bio *bio = mpd->bio;
fs/mpage.c
46
static void mpage_read_end_io(struct bio *bio)
fs/mpage.c
49
int err = blk_status_to_errno(bio->bi_status);
fs/mpage.c
51
bio_for_each_folio_all(fi, bio)
fs/mpage.c
54
bio_put(bio);
fs/mpage.c
57
static void mpage_write_end_io(struct bio *bio)
fs/mpage.c
593
if (bio && mpd->last_block_in_bio != first_block - 1)
fs/mpage.c
594
bio = mpage_bio_submit_write(bio);
fs/mpage.c
597
if (bio == NULL) {
fs/mpage.c
598
bio = bio_alloc(bdev, BIO_MAX_VECS,
fs/mpage.c
60
int err = blk_status_to_errno(bio->bi_status);
fs/mpage.c
601
bio->bi_iter.bi_sector = first_block << (blkbits - 9);
fs/mpage.c
602
wbc_init_bio(wbc, bio);
fs/mpage.c
603
bio->bi_write_hint = inode->i_write_hint;
fs/mpage.c
613
if (!bio_add_folio(bio, folio, length, 0)) {
fs/mpage.c
614
bio = mpage_bio_submit_write(bio);
fs/mpage.c
62
bio_for_each_folio_all(fi, bio) {
fs/mpage.c
624
bio = mpage_bio_submit_write(bio);
fs/mpage.c
635
if (bio)
fs/mpage.c
636
bio = mpage_bio_submit_write(bio);
fs/mpage.c
644
mpd->bio = bio;
fs/mpage.c
68
bio_put(bio);
fs/mpage.c
689
if (mpd.bio)
fs/mpage.c
690
mpage_bio_submit_write(mpd.bio);
fs/mpage.c
71
static struct bio *mpage_bio_submit_read(struct bio *bio)
fs/mpage.c
73
bio->bi_end_io = mpage_read_end_io;
fs/mpage.c
74
guard_bio_eod(bio);
fs/mpage.c
75
submit_bio(bio);
fs/mpage.c
79
static struct bio *mpage_bio_submit_write(struct bio *bio)
fs/mpage.c
81
bio->bi_end_io = mpage_write_end_io;
fs/mpage.c
82
guard_bio_eod(bio);
fs/mpage.c
83
submit_bio(bio);
fs/nfs/blocklayout/blocklayout.c
104
static struct bio *
fs/nfs/blocklayout/blocklayout.c
105
bl_submit_bio(struct bio *bio)
fs/nfs/blocklayout/blocklayout.c
107
if (bio) {
fs/nfs/blocklayout/blocklayout.c
108
get_parallel(bio->bi_private);
fs/nfs/blocklayout/blocklayout.c
110
bio_op(bio) == READ ? "read" : "write",
fs/nfs/blocklayout/blocklayout.c
111
bio->bi_iter.bi_size,
fs/nfs/blocklayout/blocklayout.c
112
(unsigned long long)bio->bi_iter.bi_sector);
fs/nfs/blocklayout/blocklayout.c
113
submit_bio(bio);
fs/nfs/blocklayout/blocklayout.c
123
static struct bio *
fs/nfs/blocklayout/blocklayout.c
124
do_add_page_to_bio(struct bio *bio, int npg, enum req_op op, sector_t isect,
fs/nfs/blocklayout/blocklayout.c
145
bio = bl_submit_bio(bio);
fs/nfs/blocklayout/blocklayout.c
156
if (!bio) {
fs/nfs/blocklayout/blocklayout.c
157
bio = bio_alloc(map->bdev, bio_max_segs(npg), op, GFP_NOIO);
fs/nfs/blocklayout/blocklayout.c
158
bio->bi_iter.bi_sector = disk_addr >> SECTOR_SHIFT;
fs/nfs/blocklayout/blocklayout.c
159
bio->bi_end_io = end_io;
fs/nfs/blocklayout/blocklayout.c
160
bio->bi_private = par;
fs/nfs/blocklayout/blocklayout.c
162
if (bio_add_page(bio, page, *len, offset) < *len) {
fs/nfs/blocklayout/blocklayout.c
163
bio = bl_submit_bio(bio);
fs/nfs/blocklayout/blocklayout.c
166
return bio;
fs/nfs/blocklayout/blocklayout.c
192
static void bl_end_io_read(struct bio *bio)
fs/nfs/blocklayout/blocklayout.c
194
struct parallel_io *par = bio->bi_private;
fs/nfs/blocklayout/blocklayout.c
196
if (bio->bi_status) {
fs/nfs/blocklayout/blocklayout.c
205
bio_put(bio);
fs/nfs/blocklayout/blocklayout.c
234
struct bio *bio = NULL;
fs/nfs/blocklayout/blocklayout.c
263
bio = bl_submit_bio(bio);
fs/nfs/blocklayout/blocklayout.c
284
bio = bl_submit_bio(bio);
fs/nfs/blocklayout/blocklayout.c
292
bio = do_add_page_to_bio(bio,
fs/nfs/blocklayout/blocklayout.c
298
if (IS_ERR(bio)) {
fs/nfs/blocklayout/blocklayout.c
299
header->pnfs_error = PTR_ERR(bio);
fs/nfs/blocklayout/blocklayout.c
300
bio = NULL;
fs/nfs/blocklayout/blocklayout.c
317
bl_submit_bio(bio);
fs/nfs/blocklayout/blocklayout.c
323
static void bl_end_io_write(struct bio *bio)
fs/nfs/blocklayout/blocklayout.c
325
struct parallel_io *par = bio->bi_private;
fs/nfs/blocklayout/blocklayout.c
328
if (bio->bi_status) {
fs/nfs/blocklayout/blocklayout.c
334
bio_put(bio);
fs/nfs/blocklayout/blocklayout.c
379
struct bio *bio = NULL;
fs/nfs/blocklayout/blocklayout.c
411
bio = bl_submit_bio(bio);
fs/nfs/blocklayout/blocklayout.c
422
bio = do_add_page_to_bio(bio, header->page_array.npages - i,
fs/nfs/blocklayout/blocklayout.c
425
if (IS_ERR(bio)) {
fs/nfs/blocklayout/blocklayout.c
426
header->pnfs_error = PTR_ERR(bio);
fs/nfs/blocklayout/blocklayout.c
427
bio = NULL;
fs/nfs/blocklayout/blocklayout.c
439
bl_submit_bio(bio);
fs/nilfs2/segbuf.c
22
struct bio *bio;
fs/nilfs2/segbuf.c
337
static void nilfs_end_bio_write(struct bio *bio)
fs/nilfs2/segbuf.c
339
struct nilfs_segment_buffer *segbuf = bio->bi_private;
fs/nilfs2/segbuf.c
341
if (bio->bi_status)
fs/nilfs2/segbuf.c
344
bio_put(bio);
fs/nilfs2/segbuf.c
351
struct bio *bio = wi->bio;
fs/nilfs2/segbuf.c
353
bio->bi_end_io = nilfs_end_bio_write;
fs/nilfs2/segbuf.c
354
bio->bi_private = segbuf;
fs/nilfs2/segbuf.c
355
submit_bio(bio);
fs/nilfs2/segbuf.c
358
wi->bio = NULL;
fs/nilfs2/segbuf.c
368
wi->bio = NULL;
fs/nilfs2/segbuf.c
384
if (!wi->bio) {
fs/nilfs2/segbuf.c
385
wi->bio = bio_alloc(wi->nilfs->ns_bdev, wi->nr_vecs,
fs/nilfs2/segbuf.c
387
wi->bio->bi_iter.bi_sector = (wi->blocknr + wi->end) <<
fs/nilfs2/segbuf.c
391
if (bio_add_folio(wi->bio, bh->b_folio, bh->b_size,
fs/nilfs2/segbuf.c
433
if (wi.bio) {
fs/nilfs2/segbuf.c
438
wi.bio->bi_opf |= REQ_SYNC;
fs/ntfs3/fsntfs.c
1604
struct bio *new, *bio = NULL;
fs/ntfs3/fsntfs.c
1636
if (bio) {
fs/ntfs3/fsntfs.c
1637
bio_chain(bio, new);
fs/ntfs3/fsntfs.c
1638
submit_bio(bio);
fs/ntfs3/fsntfs.c
1640
bio = new;
fs/ntfs3/fsntfs.c
1641
bio->bi_iter.bi_sector = lbo >> 9;
fs/ntfs3/fsntfs.c
1646
if (bio_add_page(bio, fill, add, 0) < add)
fs/ntfs3/fsntfs.c
1657
err = submit_bio_wait(bio);
fs/ntfs3/fsntfs.c
1658
bio_put(bio);
fs/ntfs3/inode.c
581
static void ntfs_iomap_read_end_io(struct bio *bio)
fs/ntfs3/inode.c
583
int error = blk_status_to_errno(bio->bi_status);
fs/ntfs3/inode.c
586
bio_for_each_folio_all(fi, bio) {
fs/ntfs3/inode.c
605
bio_put(bio);
fs/ntfs3/inode.c
621
struct bio *bio = ctx->read_ctx;
fs/ntfs3/inode.c
624
if (!bio || bio_end_sector(bio) != sector ||
fs/ntfs3/inode.c
625
!bio_add_folio(bio, folio, plen, poff)) {
fs/ntfs3/inode.c
630
if (bio)
fs/ntfs3/inode.c
631
submit_bio(bio);
fs/ntfs3/inode.c
635
bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
fs/ntfs3/inode.c
642
if (!bio)
fs/ntfs3/inode.c
643
bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
fs/ntfs3/inode.c
645
bio->bi_opf |= REQ_RAHEAD;
fs/ntfs3/inode.c
646
bio->bi_iter.bi_sector = sector;
fs/ntfs3/inode.c
647
bio->bi_end_io = ntfs_iomap_read_end_io;
fs/ntfs3/inode.c
648
bio_add_folio_nofail(bio, folio, plen, poff);
fs/ntfs3/inode.c
649
ctx->read_ctx = bio;
fs/ntfs3/inode.c
656
struct bio *bio = ctx->read_ctx;
fs/ntfs3/inode.c
658
if (bio)
fs/ntfs3/inode.c
659
submit_bio(bio);
fs/ocfs2/cluster/heartbeat.c
494
static void o2hb_bio_end_io(struct bio *bio)
fs/ocfs2/cluster/heartbeat.c
496
struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
fs/ocfs2/cluster/heartbeat.c
498
if (bio->bi_status) {
fs/ocfs2/cluster/heartbeat.c
499
mlog(ML_ERROR, "IO Error %d\n", bio->bi_status);
fs/ocfs2/cluster/heartbeat.c
500
wc->wc_error = blk_status_to_errno(bio->bi_status);
fs/ocfs2/cluster/heartbeat.c
504
bio_put(bio);
fs/ocfs2/cluster/heartbeat.c
509
static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
fs/ocfs2/cluster/heartbeat.c
519
struct bio *bio;
fs/ocfs2/cluster/heartbeat.c
526
bio = bio_alloc(reg_bdev(reg), 16, opf, GFP_ATOMIC);
fs/ocfs2/cluster/heartbeat.c
527
if (!bio) {
fs/ocfs2/cluster/heartbeat.c
529
bio = ERR_PTR(-ENOMEM);
fs/ocfs2/cluster/heartbeat.c
534
bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
fs/ocfs2/cluster/heartbeat.c
535
bio->bi_private = wc;
fs/ocfs2/cluster/heartbeat.c
536
bio->bi_end_io = o2hb_bio_end_io;
fs/ocfs2/cluster/heartbeat.c
549
len = bio_add_page(bio, page, vec_len, vec_start);
fs/ocfs2/cluster/heartbeat.c
558
return bio;
fs/ocfs2/cluster/heartbeat.c
568
struct bio *bio;
fs/ocfs2/cluster/heartbeat.c
573
bio = o2hb_setup_one_bio(reg, &wc, &current_slot, max_slots,
fs/ocfs2/cluster/heartbeat.c
575
if (IS_ERR(bio)) {
fs/ocfs2/cluster/heartbeat.c
576
status = PTR_ERR(bio);
fs/ocfs2/cluster/heartbeat.c
582
submit_bio(bio);
fs/ocfs2/cluster/heartbeat.c
600
struct bio *bio;
fs/ocfs2/cluster/heartbeat.c
606
bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1,
fs/ocfs2/cluster/heartbeat.c
608
if (IS_ERR(bio)) {
fs/ocfs2/cluster/heartbeat.c
609
status = PTR_ERR(bio);
fs/ocfs2/cluster/heartbeat.c
615
submit_bio(bio);
fs/squashfs/block.c
122
if (!bio || idx != end_idx) {
fs/squashfs/block.c
123
struct bio *new = bio_alloc_clone(bdev, fullbio,
fs/squashfs/block.c
126
if (bio) {
fs/squashfs/block.c
127
bio_trim(bio, start_idx * PAGE_SECTORS,
fs/squashfs/block.c
129
bio_chain(bio, new);
fs/squashfs/block.c
130
submit_bio(bio);
fs/squashfs/block.c
133
bio = new;
fs/squashfs/block.c
141
if (bio) {
fs/squashfs/block.c
142
bio_trim(bio, start_idx * PAGE_SECTORS,
fs/squashfs/block.c
144
err = submit_bio_wait(bio);
fs/squashfs/block.c
145
bio_put(bio);
fs/squashfs/block.c
217
struct bio **biop, int *block_offset)
fs/squashfs/block.c
229
struct bio *bio;
fs/squashfs/block.c
231
bio = bio_kmalloc(page_count, GFP_NOIO);
fs/squashfs/block.c
232
if (!bio)
fs/squashfs/block.c
234
bio_init_inline(bio, sb->s_bdev, page_count, REQ_OP_READ);
fs/squashfs/block.c
235
bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT);
fs/squashfs/block.c
256
__bio_add_page(bio, page, len, offset);
fs/squashfs/block.c
262
error = squashfs_bio_read_cached(bio, cache_mapping, index,
fs/squashfs/block.c
266
error = submit_bio_wait(bio);
fs/squashfs/block.c
270
*biop = bio;
fs/squashfs/block.c
275
bio_free_pages(bio);
fs/squashfs/block.c
276
bio_uninit(bio);
fs/squashfs/block.c
277
kfree(bio);
fs/squashfs/block.c
294
struct bio *bio = NULL;
fs/squashfs/block.c
319
res = squashfs_bio_read(sb, index, 2, &bio, &offset);
fs/squashfs/block.c
323
if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
fs/squashfs/block.c
33
static int copy_bio_to_actor(struct bio *bio,
fs/squashfs/block.c
333
if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
fs/squashfs/block.c
340
bio_free_pages(bio);
fs/squashfs/block.c
341
bio_uninit(bio);
fs/squashfs/block.c
342
kfree(bio);
fs/squashfs/block.c
360
res = squashfs_bio_read(sb, index, length, &bio, &offset);
fs/squashfs/block.c
369
res = msblk->thread_ops->decompress(msblk, bio, offset, length, output);
fs/squashfs/block.c
371
res = copy_bio_to_actor(bio, output, offset, length);
fs/squashfs/block.c
375
bio_free_pages(bio);
fs/squashfs/block.c
376
bio_uninit(bio);
fs/squashfs/block.c
377
kfree(bio);
fs/squashfs/block.c
46
if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all)))
fs/squashfs/block.c
70
if (!bio_next_segment(bio, &iter_all))
fs/squashfs/block.c
79
static int squashfs_bio_read_cached(struct bio *fullbio,
fs/squashfs/block.c
87
struct bio *bio = NULL;
fs/squashfs/decompressor.h
20
struct bio *, int, int, struct squashfs_page_actor *);
fs/squashfs/decompressor_multi.c
182
static int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio,
fs/squashfs/decompressor_multi.c
190
bio, offset, length, output);
fs/squashfs/decompressor_multi_percpu.c
77
static int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio,
fs/squashfs/decompressor_multi_percpu.c
88
res = msblk->decompressor->decompress(msblk, stream->stream, bio,
fs/squashfs/decompressor_single.c
62
static int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio,
fs/squashfs/decompressor_single.c
70
res = msblk->decompressor->decompress(msblk, stream->stream, bio,
fs/squashfs/lz4_wrapper.c
101
while (bio_next_segment(bio, &iter_all)) {
fs/squashfs/lz4_wrapper.c
92
struct bio *bio, int offset, int length,
fs/squashfs/lzo_wrapper.c
66
struct bio *bio, int offset, int length,
fs/squashfs/lzo_wrapper.c
76
while (bio_next_segment(bio, &iter_all)) {
fs/squashfs/squashfs.h
51
int (*decompress)(struct squashfs_sb_info *msblk, struct bio *bio,
fs/squashfs/xz_wrapper.c
120
struct bio *bio, int offset, int length,
fs/squashfs/xz_wrapper.c
146
if (!bio_next_segment(bio, &iter_all)) {
fs/squashfs/zlib_wrapper.c
53
struct bio *bio, int offset, int length,
fs/squashfs/zlib_wrapper.c
77
if (!bio_next_segment(bio, &iter_all)) {
fs/squashfs/zstd_wrapper.c
62
struct bio *bio, int offset, int length,
fs/squashfs/zstd_wrapper.c
95
if (!bio_next_segment(bio, &iter_all)) {
fs/verity/verify.c
448
void fsverity_verify_bio(struct fsverity_info *vi, struct bio *bio)
fs/verity/verify.c
455
bio_for_each_folio_all(fi, bio) {
fs/verity/verify.c
467
bio->bi_status = BLK_STS_IOERR;
fs/xfs/xfs_aops.c
216
struct bio *bio)
fs/xfs/xfs_aops.c
218
struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
fs/xfs/xfs_aops.c
227
if (IS_ENABLED(CONFIG_XFS_RT) && bio_is_zone_append(bio)) {
fs/xfs/xfs_aops.c
228
ioend->io_sector = bio->bi_iter.bi_sector;
fs/xfs/xfs_aops.h
13
void xfs_end_bio(struct bio *bio);
fs/xfs/xfs_bio_io.c
23
struct bio *bio;
fs/xfs/xfs_bio_io.c
29
bio = bio_alloc(bdev, bio_max_vecs(count), op, GFP_KERNEL);
fs/xfs/xfs_bio_io.c
30
bio->bi_iter.bi_sector = sector;
fs/xfs/xfs_bio_io.c
33
added = bio_add_vmalloc_chunk(bio, data + done, count - done);
fs/xfs/xfs_bio_io.c
35
struct bio *prev = bio;
fs/xfs/xfs_bio_io.c
37
bio = bio_alloc(prev->bi_bdev,
fs/xfs/xfs_bio_io.c
40
bio->bi_iter.bi_sector = bio_end_sector(prev);
fs/xfs/xfs_bio_io.c
41
bio_chain(prev, bio);
fs/xfs/xfs_bio_io.c
47
error = submit_bio_wait(bio);
fs/xfs/xfs_bio_io.c
48
bio_put(bio);
fs/xfs/xfs_buf.c
1292
struct bio *bio)
fs/xfs/xfs_buf.c
1294
struct xfs_buf *bp = bio->bi_private;
fs/xfs/xfs_buf.c
1296
if (bio->bi_status)
fs/xfs/xfs_buf.c
1297
xfs_buf_ioerror(bp, blk_status_to_errno(bio->bi_status));
fs/xfs/xfs_buf.c
1309
bio_put(bio);
fs/xfs/xfs_buf.c
1337
struct bio *bio;
fs/xfs/xfs_buf.c
1339
bio = bio_alloc(bp->b_target->bt_bdev, nr_vecs, xfs_buf_bio_op(bp),
fs/xfs/xfs_buf.c
1342
bio_add_vmalloc(bio, bp->b_addr, len);
fs/xfs/xfs_buf.c
1344
bio_add_virt_nofail(bio, bp->b_addr, len);
fs/xfs/xfs_buf.c
1345
bio->bi_private = bp;
fs/xfs/xfs_buf.c
1346
bio->bi_end_io = xfs_buf_bio_end_io;
fs/xfs/xfs_buf.c
1355
struct bio *split;
fs/xfs/xfs_buf.c
1357
split = bio_split(bio, bp->b_maps[map].bm_len, GFP_NOFS,
fs/xfs/xfs_buf.c
1360
bio_chain(split, bio);
fs/xfs/xfs_buf.c
1363
bio->bi_iter.bi_sector = bp->b_maps[map].bm_bn;
fs/xfs/xfs_buf.c
1364
submit_bio(bio);
fs/xfs/xfs_discard.c
103
bio_put(bio);
fs/xfs/xfs_discard.c
117
struct bio *bio = NULL;
fs/xfs/xfs_discard.c
131
GFP_KERNEL, &bio);
fs/xfs/xfs_discard.c
134
if (bio) {
fs/xfs/xfs_discard.c
135
bio->bi_private = extents;
fs/xfs/xfs_discard.c
136
bio->bi_end_io = xfs_discard_endio;
fs/xfs/xfs_discard.c
137
submit_bio(bio);
fs/xfs/xfs_discard.c
473
struct bio *bio = NULL;
fs/xfs/xfs_discard.c
489
GFP_NOFS, &bio);
fs/xfs/xfs_discard.c
493
if (bio) {
fs/xfs/xfs_discard.c
494
error = submit_bio_wait(bio);
fs/xfs/xfs_discard.c
503
bio_put(bio);
fs/xfs/xfs_discard.c
97
struct bio *bio)
fs/xfs/xfs_discard.c
99
struct xfs_busy_extents *extents = bio->bi_private;
fs/xfs/xfs_file.c
235
struct bio *bio,
fs/xfs/xfs_file.c
238
iomap_init_ioend(iter->inode, bio, file_offset, IOMAP_IOEND_DIRECT);
fs/xfs/xfs_file.c
239
bio->bi_end_io = xfs_end_bio;
fs/xfs/xfs_file.c
240
submit_bio(bio);
fs/xfs/xfs_file.c
663
struct bio *bio,
fs/xfs/xfs_file.c
671
count_fsb = XFS_B_TO_FSB(mp, bio->bi_iter.bi_size);
fs/xfs/xfs_file.c
677
bio_io_error(bio);
fs/xfs/xfs_file.c
682
bio->bi_end_io = xfs_end_bio;
fs/xfs/xfs_file.c
683
ioend = iomap_init_ioend(iter->inode, bio, file_offset,
fs/xfs/xfs_log.c
1533
struct bio *bio)
fs/xfs/xfs_log.c
1535
struct xlog_in_core *iclog = bio->bi_private;
fs/xfs/xfs_log.c
1618
struct bio *split;
fs/xfs/xfs_log_priv.h
223
struct bio ic_bio;
fs/xfs/xfs_verify_media.c
269
struct bio *bio;
fs/xfs/xfs_verify_media.c
335
bio = bio_alloc(btp->bt_bdev, 1, REQ_OP_READ, GFP_KERNEL);
fs/xfs/xfs_verify_media.c
336
if (!bio) {
fs/xfs/xfs_verify_media.c
345
bio_reset(bio, btp->bt_bdev, REQ_OP_READ);
fs/xfs/xfs_verify_media.c
346
bio->bi_iter.bi_sector = daddr;
fs/xfs/xfs_verify_media.c
347
bio_add_folio_nofail(bio, folio,
fs/xfs/xfs_verify_media.c
356
bio_bbcount = bio->bi_iter.bi_size >> SECTOR_SHIFT;
fs/xfs/xfs_verify_media.c
357
submit_bio_wait(bio);
fs/xfs/xfs_verify_media.c
358
bio_status = bio->bi_status;
fs/xfs/xfs_verify_media.c
389
bio_put(bio);
fs/xfs/xfs_zone_gc.c
109
struct bio bio; /* must be last */
fs/xfs/xfs_zone_gc.c
208
if (bioset_init(&data->bio_set, 16, offsetof(struct xfs_gc_bio, bio),
fs/xfs/xfs_zone_gc.c
583
struct bio *bio)
fs/xfs/xfs_zone_gc.c
586
container_of(bio, struct xfs_gc_bio, bio);
fs/xfs/xfs_zone_gc.c
650
bio_add_folio_nofail(&chunk->bio,
fs/xfs/xfs_zone_gc.c
671
struct bio *bio;
fs/xfs/xfs_zone_gc.c
689
bio = bio_alloc_bioset(bdev,
fs/xfs/xfs_zone_gc.c
693
chunk = container_of(bio, struct xfs_gc_bio, bio);
fs/xfs/xfs_zone_gc.c
707
bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock);
fs/xfs/xfs_zone_gc.c
708
bio->bi_end_io = xfs_zone_gc_end_io;
fs/xfs/xfs_zone_gc.c
719
submit_bio(bio);
fs/xfs/xfs_zone_gc.c
732
bio_put(&chunk->bio);
fs/xfs/xfs_zone_gc.c
741
chunk->bio.bi_opf &= ~REQ_OP_WRITE;
fs/xfs/xfs_zone_gc.c
742
chunk->bio.bi_opf |= REQ_OP_ZONE_APPEND;
fs/xfs/xfs_zone_gc.c
744
chunk->bio.bi_iter.bi_sector = chunk->new_daddr;
fs/xfs/xfs_zone_gc.c
745
chunk->bio.bi_end_io = xfs_zone_gc_end_io;
fs/xfs/xfs_zone_gc.c
746
submit_bio(&chunk->bio);
fs/xfs/xfs_zone_gc.c
755
&bdev_get_queue(chunk->bio.bi_bdev)->limits;
fs/xfs/xfs_zone_gc.c
759
struct bio *split;
fs/xfs/xfs_zone_gc.c
765
split_sectors = bio_split_rw_at(&chunk->bio, lim, &nsegs,
fs/xfs/xfs_zone_gc.c
775
split = bio_split(&chunk->bio, split_sectors, GFP_NOFS, &data->bio_set);
fs/xfs/xfs_zone_gc.c
776
split_chunk = container_of(split, struct xfs_gc_bio, bio);
fs/xfs/xfs_zone_gc.c
810
if (chunk->bio.bi_status)
fs/xfs/xfs_zone_gc.c
823
bio_reuse(&chunk->bio, REQ_OP_WRITE);
fs/xfs/xfs_zone_gc.c
839
if (chunk->bio.bi_status)
fs/xfs/xfs_zone_gc.c
867
chunk->new_daddr = chunk->bio.bi_iter.bi_sector;
fs/xfs/xfs_zone_gc.c
880
struct xfs_rtgroup *rtg = chunk->bio.bi_private;
fs/xfs/xfs_zone_gc.c
884
if (chunk->bio.bi_status) {
fs/xfs/xfs_zone_gc.c
897
bio_put(&chunk->bio);
fs/xfs/xfs_zone_gc.c
903
struct bio *bio)
fs/xfs/xfs_zone_gc.c
912
bio_io_error(bio);
fs/xfs/xfs_zone_gc.c
918
bio->bi_iter.bi_sector = xfs_gbno_to_daddr(rtg_group(rtg), 0);
fs/xfs/xfs_zone_gc.c
919
if (!bdev_zone_is_seq(bio->bi_bdev, bio->bi_iter.bi_sector)) {
fs/xfs/xfs_zone_gc.c
924
if (!bdev_max_discard_sectors(bio->bi_bdev)) {
fs/xfs/xfs_zone_gc.c
925
bio_endio(bio);
fs/xfs/xfs_zone_gc.c
928
bio->bi_opf &= ~REQ_OP_ZONE_RESET;
fs/xfs/xfs_zone_gc.c
929
bio->bi_opf |= REQ_OP_DISCARD;
fs/xfs/xfs_zone_gc.c
930
bio->bi_iter.bi_size = XFS_FSB_TO_B(mp, rtg_blocks(rtg));
fs/xfs/xfs_zone_gc.c
933
submit_bio(bio);
fs/xfs/xfs_zone_gc.c
936
static void xfs_bio_wait_endio(struct bio *bio)
fs/xfs/xfs_zone_gc.c
938
complete(bio->bi_private);
fs/xfs/xfs_zone_gc.c
946
struct bio bio;
fs/xfs/xfs_zone_gc.c
949
bio_init(&bio, rtg_mount(rtg)->m_rtdev_targp->bt_bdev, NULL, 0,
fs/xfs/xfs_zone_gc.c
951
bio.bi_private = &done;
fs/xfs/xfs_zone_gc.c
952
bio.bi_end_io = xfs_bio_wait_endio;
fs/xfs/xfs_zone_gc.c
953
xfs_submit_zone_reset_bio(rtg, &bio);
fs/xfs/xfs_zone_gc.c
956
error = blk_status_to_errno(bio.bi_status);
fs/xfs/xfs_zone_gc.c
957
bio_uninit(&bio);
fs/xfs/xfs_zone_gc.c
976
struct bio *bio;
fs/xfs/xfs_zone_gc.c
983
bio = bio_alloc_bioset(rtg_mount(rtg)->m_rtdev_targp->bt_bdev,
fs/xfs/xfs_zone_gc.c
985
bio->bi_private = rtg;
fs/xfs/xfs_zone_gc.c
986
bio->bi_end_io = xfs_zone_gc_end_io;
fs/xfs/xfs_zone_gc.c
988
chunk = container_of(bio, struct xfs_gc_bio, bio);
fs/xfs/xfs_zone_gc.c
992
xfs_submit_zone_reset_bio(rtg, bio);
include/linux/bio-integrity.h
103
static inline void bio_integrity_unmap_user(struct bio *bio)
include/linux/bio-integrity.h
107
static inline bool bio_integrity_prep(struct bio *bio)
include/linux/bio-integrity.h
112
static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
include/linux/bio-integrity.h
118
static inline void bio_integrity_advance(struct bio *bio,
include/linux/bio-integrity.h
123
static inline void bio_integrity_trim(struct bio *bio)
include/linux/bio-integrity.h
127
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
include/linux/bio-integrity.h
133
bio_integrity_alloc(struct bio *bio, gfp_t gfp, unsigned int nr)
include/linux/bio-integrity.h
138
static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
include/linux/bio-integrity.h
145
void bio_integrity_alloc_buf(struct bio *bio, bool zero_buffer);
include/linux/bio-integrity.h
43
static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
include/linux/bio-integrity.h
45
if (bio->bi_opf & REQ_INTEGRITY)
include/linux/bio-integrity.h
46
return bio->bi_integrity;
include/linux/bio-integrity.h
51
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
include/linux/bio-integrity.h
53
struct bio_integrity_payload *bip = bio_integrity(bio);
include/linux/bio-integrity.h
72
void bio_integrity_init(struct bio *bio, struct bio_integrity_payload *bip,
include/linux/bio-integrity.h
74
struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp,
include/linux/bio-integrity.h
76
int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len,
include/linux/bio-integrity.h
78
int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter);
include/linux/bio-integrity.h
79
int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta);
include/linux/bio-integrity.h
80
void bio_integrity_unmap_user(struct bio *bio);
include/linux/bio-integrity.h
81
bool bio_integrity_prep(struct bio *bio);
include/linux/bio-integrity.h
82
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done);
include/linux/bio-integrity.h
83
void bio_integrity_trim(struct bio *bio);
include/linux/bio-integrity.h
84
int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask);
include/linux/bio-integrity.h
88
static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
include/linux/bio-integrity.h
93
static inline int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
include/linux/bio-integrity.h
98
static inline int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
include/linux/bio.h
100
bvec_advance(&bio->bi_io_vec[iter->idx], iter);
include/linux/bio.h
108
#define bio_for_each_segment_all(bvl, bio, iter) \
include/linux/bio.h
109
for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
include/linux/bio.h
111
static inline void bio_advance_iter(const struct bio *bio,
include/linux/bio.h
116
if (bio_no_advance_iter(bio))
include/linux/bio.h
119
bvec_iter_advance(bio->bi_io_vec, iter, bytes);
include/linux/bio.h
124
static inline void bio_advance_iter_single(const struct bio *bio,
include/linux/bio.h
130
if (bio_no_advance_iter(bio))
include/linux/bio.h
133
bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
include/linux/bio.h
136
void __bio_advance(struct bio *, unsigned bytes);
include/linux/bio.h
149
static inline void bio_advance(struct bio *bio, unsigned int nbytes)
include/linux/bio.h
151
if (nbytes == bio->bi_iter.bi_size) {
include/linux/bio.h
152
bio->bi_iter.bi_size = 0;
include/linux/bio.h
155
__bio_advance(bio, nbytes);
include/linux/bio.h
158
#define __bio_for_each_segment(bvl, bio, iter, start) \
include/linux/bio.h
161
((bvl = bio_iter_iovec((bio), (iter))), 1); \
include/linux/bio.h
162
bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
include/linux/bio.h
164
#define bio_for_each_segment(bvl, bio, iter) \
include/linux/bio.h
165
__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
include/linux/bio.h
167
#define __bio_for_each_bvec(bvl, bio, iter, start) \
include/linux/bio.h
170
((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
include/linux/bio.h
171
bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
include/linux/bio.h
174
#define bio_for_each_bvec(bvl, bio, iter) \
include/linux/bio.h
175
__bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
include/linux/bio.h
181
#define bio_for_each_bvec_all(bvl, bio, i) \
include/linux/bio.h
182
for (i = 0, bvl = bio_first_bvec_all(bio); \
include/linux/bio.h
183
i < (bio)->bi_vcnt; i++, bvl++)
include/linux/bio.h
187
static inline unsigned bio_segments(struct bio *bio)
include/linux/bio.h
198
switch (bio_op(bio)) {
include/linux/bio.h
207
bio_for_each_segment(bv, bio, iter)
include/linux/bio.h
227
static inline void bio_get(struct bio *bio)
include/linux/bio.h
229
bio->bi_flags |= (1 << BIO_REFFED);
include/linux/bio.h
23
#define bio_iter_iovec(bio, iter) \
include/linux/bio.h
231
atomic_inc(&bio->__bi_cnt);
include/linux/bio.h
234
static inline void bio_cnt_set(struct bio *bio, unsigned int count)
include/linux/bio.h
237
bio->bi_flags |= (1 << BIO_REFFED);
include/linux/bio.h
24
bvec_iter_bvec((bio)->bi_io_vec, (iter))
include/linux/bio.h
240
atomic_set(&bio->__bi_cnt, count);
include/linux/bio.h
243
static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
include/linux/bio.h
245
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
include/linux/bio.h
246
return bio->bi_io_vec;
include/linux/bio.h
249
static inline struct page *bio_first_page_all(struct bio *bio)
include/linux/bio.h
251
return bio_first_bvec_all(bio)->bv_page;
include/linux/bio.h
254
static inline struct folio *bio_first_folio_all(struct bio *bio)
include/linux/bio.h
256
return page_folio(bio_first_page_all(bio));
include/linux/bio.h
26
#define bio_iter_page(bio, iter) \
include/linux/bio.h
27
bvec_iter_page((bio)->bi_io_vec, (iter))
include/linux/bio.h
276
static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
include/linux/bio.h
279
struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
include/linux/bio.h
28
#define bio_iter_len(bio, iter) \
include/linux/bio.h
281
if (unlikely(i >= bio->bi_vcnt)) {
include/linux/bio.h
29
bvec_iter_len((bio)->bi_io_vec, (iter))
include/linux/bio.h
295
static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
include/linux/bio.h
30
#define bio_iter_offset(bio, iter) \
include/linux/bio.h
304
bio_first_folio(fi, bio, fi->_i + 1);
include/linux/bio.h
31
bvec_iter_offset((bio)->bi_io_vec, (iter))
include/linux/bio.h
313
#define bio_for_each_folio_all(fi, bio) \
include/linux/bio.h
314
for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
include/linux/bio.h
316
void bio_trim(struct bio *bio, sector_t offset, sector_t size);
include/linux/bio.h
317
extern struct bio *bio_split(struct bio *bio, int sectors,
include/linux/bio.h
319
int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
include/linux/bio.h
321
u8 bio_seg_gap(struct request_queue *q, struct bio *prev, struct bio *next,
include/linux/bio.h
33
#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
include/linux/bio.h
334
static inline struct bio *bio_next_split(struct bio *bio, int sectors,
include/linux/bio.h
337
if (sectors >= bio_sectors(bio))
include/linux/bio.h
338
return bio;
include/linux/bio.h
34
#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
include/linux/bio.h
340
return bio_split(bio, sectors, gfp, bs);
include/linux/bio.h
35
#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
include/linux/bio.h
352
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
include/linux/bio.h
355
struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
include/linux/bio.h
356
extern void bio_put(struct bio *);
include/linux/bio.h
358
struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
include/linux/bio.h
360
int bio_init_clone(struct block_device *bdev, struct bio *bio,
include/linux/bio.h
361
struct bio *bio_src, gfp_t gfp);
include/linux/bio.h
365
static inline struct bio *bio_alloc(struct block_device *bdev,
include/linux/bio.h
371
void submit_bio(struct bio *bio);
include/linux/bio.h
373
extern void bio_endio(struct bio *);
include/linux/bio.h
375
static inline void bio_io_error(struct bio *bio)
include/linux/bio.h
377
bio->bi_status = BLK_STS_IOERR;
include/linux/bio.h
378
bio_endio(bio);
include/linux/bio.h
381
static inline void bio_wouldblock_error(struct bio *bio)
include/linux/bio.h
383
bio_set_flag(bio, BIO_QUIET);
include/linux/bio.h
384
bio->bi_status = BLK_STS_AGAIN;
include/linux/bio.h
385
bio_endio(bio);
include/linux/bio.h
40
#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
include/linux/bio.h
41
#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
include/linux/bio.h
425
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
include/linux/bio.h
427
static inline void bio_init_inline(struct bio *bio, struct block_device *bdev,
include/linux/bio.h
430
bio_init(bio, bdev, bio_inline_vecs(bio), max_vecs, opf);
include/linux/bio.h
432
extern void bio_uninit(struct bio *);
include/linux/bio.h
433
void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
include/linux/bio.h
434
void bio_reuse(struct bio *bio, blk_opf_t opf);
include/linux/bio.h
435
void bio_chain(struct bio *, struct bio *);
include/linux/bio.h
437
int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
include/linux/bio.h
439
bool __must_check bio_add_folio(struct bio *bio, struct folio *folio,
include/linux/bio.h
441
void __bio_add_page(struct bio *bio, struct page *page,
include/linux/bio.h
443
void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
include/linux/bio.h
445
void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len);
include/linux/bio.h
46
#define bio_data_dir(bio) \
include/linux/bio.h
462
unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len);
include/linux/bio.h
463
bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len);
include/linux/bio.h
465
int submit_bio_wait(struct bio *bio);
include/linux/bio.h
469
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
include/linux/bio.h
47
(op_is_write(bio_op(bio)) ? WRITE : READ)
include/linux/bio.h
472
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter);
include/linux/bio.h
473
void __bio_release_pages(struct bio *bio, bool mark_dirty);
include/linux/bio.h
474
extern void bio_set_pages_dirty(struct bio *bio);
include/linux/bio.h
475
extern void bio_check_pages_dirty(struct bio *bio);
include/linux/bio.h
477
int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter);
include/linux/bio.h
478
void bio_iov_iter_unbounce(struct bio *bio, bool is_error, bool mark_dirty);
include/linux/bio.h
480
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
include/linux/bio.h
481
struct bio *src, struct bvec_iter *src_iter);
include/linux/bio.h
482
extern void bio_copy_data(struct bio *dst, struct bio *src);
include/linux/bio.h
483
extern void bio_free_pages(struct bio *bio);
include/linux/bio.h
484
void guard_bio_eod(struct bio *bio);
include/linux/bio.h
485
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
include/linux/bio.h
487
static inline void zero_fill_bio(struct bio *bio)
include/linux/bio.h
489
zero_fill_bio_iter(bio, bio->bi_iter);
include/linux/bio.h
49
static inline bool bio_flagged(const struct bio *bio, unsigned int bit)
include/linux/bio.h
492
static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
include/linux/bio.h
494
if (bio_flagged(bio, BIO_PAGE_PINNED))
include/linux/bio.h
495
__bio_release_pages(bio, mark_dirty);
include/linux/bio.h
498
#define bio_dev(bio) \
include/linux/bio.h
499
disk_devt((bio)->bi_bdev->bd_disk)
include/linux/bio.h
502
void bio_associate_blkg(struct bio *bio);
include/linux/bio.h
503
void bio_associate_blkg_from_css(struct bio *bio,
include/linux/bio.h
505
void bio_clone_blkg_association(struct bio *dst, struct bio *src);
include/linux/bio.h
506
void blkcg_punt_bio_submit(struct bio *bio);
include/linux/bio.h
508
static inline void bio_associate_blkg(struct bio *bio) { }
include/linux/bio.h
509
static inline void bio_associate_blkg_from_css(struct bio *bio,
include/linux/bio.h
51
return bio->bi_flags & (1U << bit);
include/linux/bio.h
512
static inline void bio_clone_blkg_association(struct bio *dst,
include/linux/bio.h
513
struct bio *src) { }
include/linux/bio.h
514
static inline void blkcg_punt_bio_submit(struct bio *bio)
include/linux/bio.h
516
submit_bio(bio);
include/linux/bio.h
520
static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
include/linux/bio.h
522
bio_clear_flag(bio, BIO_REMAPPED);
include/linux/bio.h
523
if (bio->bi_bdev != bdev)
include/linux/bio.h
524
bio_clear_flag(bio, BIO_BPS_THROTTLED);
include/linux/bio.h
525
bio->bi_bdev = bdev;
include/linux/bio.h
526
bio_associate_blkg(bio);
include/linux/bio.h
537
struct bio *head;
include/linux/bio.h
538
struct bio *tail;
include/linux/bio.h
54
static inline void bio_set_flag(struct bio *bio, unsigned int bit)
include/linux/bio.h
553
#define bio_list_for_each(bio, bl) \
include/linux/bio.h
554
for (bio = (bl)->head; bio; bio = bio->bi_next)
include/linux/bio.h
559
struct bio *bio;
include/linux/bio.h
56
bio->bi_flags |= (1U << bit);
include/linux/bio.h
561
bio_list_for_each(bio, bl)
include/linux/bio.h
567
static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
include/linux/bio.h
569
bio->bi_next = NULL;
include/linux/bio.h
572
bl->tail->bi_next = bio;
include/linux/bio.h
574
bl->head = bio;
include/linux/bio.h
576
bl->tail = bio;
include/linux/bio.h
579
static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
include/linux/bio.h
581
bio->bi_next = bl->head;
include/linux/bio.h
583
bl->head = bio;
include/linux/bio.h
586
bl->tail = bio;
include/linux/bio.h
59
static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
include/linux/bio.h
61
bio->bi_flags &= ~(1U << bit);
include/linux/bio.h
623
static inline struct bio *bio_list_peek(struct bio_list *bl)
include/linux/bio.h
628
static inline struct bio *bio_list_pop(struct bio_list *bl)
include/linux/bio.h
630
struct bio *bio = bl->head;
include/linux/bio.h
632
if (bio) {
include/linux/bio.h
637
bio->bi_next = NULL;
include/linux/bio.h
640
return bio;
include/linux/bio.h
643
static inline struct bio *bio_list_get(struct bio_list *bl)
include/linux/bio.h
645
struct bio *bio = bl->head;
include/linux/bio.h
649
return bio;
include/linux/bio.h
656
static inline void bio_inc_remaining(struct bio *bio)
include/linux/bio.h
658
bio_set_flag(bio, BIO_CHAIN);
include/linux/bio.h
660
atomic_inc(&bio->__bi_remaining);
include/linux/bio.h
67
static inline bool bio_has_data(struct bio *bio)
include/linux/bio.h
69
if (bio &&
include/linux/bio.h
70
bio->bi_iter.bi_size &&
include/linux/bio.h
71
bio_op(bio) != REQ_OP_DISCARD &&
include/linux/bio.h
711
static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
include/linux/bio.h
713
bio->bi_opf |= REQ_POLLED;
include/linux/bio.h
715
bio->bi_opf |= REQ_NOWAIT;
include/linux/bio.h
718
static inline void bio_clear_polled(struct bio *bio)
include/linux/bio.h
72
bio_op(bio) != REQ_OP_SECURE_ERASE &&
include/linux/bio.h
720
bio->bi_opf &= ~REQ_POLLED;
include/linux/bio.h
73
bio_op(bio) != REQ_OP_WRITE_ZEROES)
include/linux/bio.h
732
static inline bool bio_is_zone_append(struct bio *bio)
include/linux/bio.h
736
return bio_op(bio) == REQ_OP_ZONE_APPEND ||
include/linux/bio.h
737
bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
include/linux/bio.h
740
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
include/linux/bio.h
742
struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new);
include/linux/bio.h
744
struct bio *blk_alloc_discard_bio(struct block_device *bdev,
include/linux/bio.h
79
static inline bool bio_no_advance_iter(const struct bio *bio)
include/linux/bio.h
81
return bio_op(bio) == REQ_OP_DISCARD ||
include/linux/bio.h
82
bio_op(bio) == REQ_OP_SECURE_ERASE ||
include/linux/bio.h
83
bio_op(bio) == REQ_OP_WRITE_ZEROES;
include/linux/bio.h
86
static inline void *bio_data(struct bio *bio)
include/linux/bio.h
88
if (bio_has_data(bio))
include/linux/bio.h
89
return page_address(bio_page(bio)) + bio_offset(bio);
include/linux/bio.h
94
static inline bool bio_next_segment(const struct bio *bio,
include/linux/bio.h
97
if (iter->idx >= bio->bi_vcnt)
include/linux/blk-cgroup.h
19
struct bio;
include/linux/blk-cgroup.h
34
struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio);
include/linux/blk-cgroup.h
42
static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
include/linux/blk-cgroup.h
49
char *blkcg_get_fc_appid(struct bio *bio);
include/linux/blk-crypto.h
130
static inline bool bio_has_crypt_ctx(struct bio *bio)
include/linux/blk-crypto.h
132
return bio->bi_crypt_context;
include/linux/blk-crypto.h
135
static inline struct bio_crypt_ctx *bio_crypt_ctx(struct bio *bio)
include/linux/blk-crypto.h
137
return bio->bi_crypt_context;
include/linux/blk-crypto.h
140
void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
include/linux/blk-crypto.h
172
static inline bool bio_has_crypt_ctx(struct bio *bio)
include/linux/blk-crypto.h
177
static inline struct bio_crypt_ctx *bio_crypt_ctx(struct bio *bio)
include/linux/blk-crypto.h
184
bool __blk_crypto_submit_bio(struct bio *bio);
include/linux/blk-crypto.h
200
static inline void blk_crypto_submit_bio(struct bio *bio)
include/linux/blk-crypto.h
202
if (!bio_has_crypt_ctx(bio) || __blk_crypto_submit_bio(bio))
include/linux/blk-crypto.h
203
submit_bio(bio);
include/linux/blk-crypto.h
206
int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
include/linux/blk-crypto.h
218
static inline int bio_crypt_clone(struct bio *dst, struct bio *src,
include/linux/blk-integrity.h
105
return mp_bvec_iter_bvec(rq->bio->bi_integrity->bip_vec,
include/linux/blk-integrity.h
106
rq->bio->bi_integrity->bip_iter);
include/linux/blk-integrity.h
115
struct bio *b)
include/linux/blk-integrity.h
36
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
include/linux/blk-mq-dma.h
10
struct bio *bio;
include/linux/blk-mq.h
1056
int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
include/linux/blk-mq.h
1075
int blk_rq_unmap_user(struct bio *);
include/linux/blk-mq.h
1078
int blk_rq_append_bio(struct request *rq, struct bio *bio);
include/linux/blk-mq.h
1085
struct bio *bio;
include/linux/blk-mq.h
1089
if ((rq->bio)) \
include/linux/blk-mq.h
1090
for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
include/linux/blk-mq.h
1093
__rq_for_each_bio(_iter.bio, _rq) \
include/linux/blk-mq.h
1094
bio_for_each_segment(bvl, _iter.bio, _iter.iter)
include/linux/blk-mq.h
1097
__rq_for_each_bio(_iter.bio, _rq) \
include/linux/blk-mq.h
1098
bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
include/linux/blk-mq.h
1101
(_iter.bio->bi_next == NULL && \
include/linux/blk-mq.h
1124
if (!rq->bio)
include/linux/blk-mq.h
1126
if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
include/linux/blk-mq.h
1127
return rq->bio->bi_iter.bi_size;
include/linux/blk-mq.h
1128
return bio_iovec(rq->bio).bv_len;
include/linux/blk-mq.h
1167
return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
include/linux/blk-mq.h
1173
struct bio *bio;
include/linux/blk-mq.h
1175
__rq_for_each_bio(bio, rq)
include/linux/blk-mq.h
122
struct bio *bio;
include/linux/blk-mq.h
123
struct bio *biotail;
include/linux/blk-mq.h
241
if (req->bio)
include/linux/blk-mq.h
242
return req->bio->bi_ioprio;
include/linux/blk_types.h
16
struct bio;
include/linux/blk_types.h
21
typedef void (bio_end_io_t) (struct bio *);
include/linux/blk_types.h
211
struct bio *bi_next; /* request queue link */
include/linux/blk_types.h
289
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
include/linux/blk_types.h
293
static inline struct bio_vec *bio_inline_vecs(struct bio *bio)
include/linux/blk_types.h
295
return (struct bio_vec *)(bio + 1);
include/linux/blk_types.h
454
static inline enum req_op bio_op(const struct bio *bio)
include/linux/blk_types.h
456
return bio->bi_opf & REQ_OP_MASK;
include/linux/blkdev.h
1019
void submit_bio_noacct(struct bio *bio);
include/linux/blkdev.h
1020
struct bio *bio_split_to_limits(struct bio *bio);
include/linux/blkdev.h
1021
struct bio *bio_submit_split_bioset(struct bio *bio, unsigned int split_sectors,
include/linux/blkdev.h
1038
int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
include/linux/blkdev.h
1050
static inline unsigned int bio_zone_no(struct bio *bio)
include/linux/blkdev.h
1052
return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
include/linux/blkdev.h
1055
static inline bool bio_straddles_zones(struct bio *bio)
include/linux/blkdev.h
1057
return bio_sectors(bio) &&
include/linux/blkdev.h
1058
bio_zone_no(bio) !=
include/linux/blkdev.h
1059
disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1);
include/linux/blkdev.h
1263
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
include/linux/blkdev.h
1272
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
include/linux/blkdev.h
1542
static inline sector_t bio_offset_from_zone_start(struct bio *bio)
include/linux/blkdev.h
1544
return bdev_offset_from_zone_start(bio->bi_bdev,
include/linux/blkdev.h
1545
bio->bi_iter.bi_sector);
include/linux/blkdev.h
1649
void (*submit_bio)(struct bio *bio);
include/linux/blkdev.h
1650
int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
include/linux/blkdev.h
1709
unsigned long bio_start_io_acct(struct bio *bio);
include/linux/blkdev.h
1710
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
include/linux/blkdev.h
1718
static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
include/linux/blkdev.h
1720
return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
include/linux/blkdev.h
1873
static inline int bio_split_rw_at(struct bio *bio,
include/linux/blkdev.h
1877
return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
include/linux/blkdev.h
870
static inline bool bio_needs_zone_write_plugging(struct bio *bio)
include/linux/blkdev.h
872
enum req_op op = bio_op(bio);
include/linux/blkdev.h
878
if (!bio->bi_bdev->bd_disk->zone_wplugs_hash)
include/linux/blkdev.h
886
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
include/linux/blkdev.h
890
if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
include/linux/blkdev.h
910
bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
include/linux/blkdev.h
948
static inline bool bio_needs_zone_write_plugging(struct bio *bio)
include/linux/blkdev.h
953
static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
include/linux/bsg-lib.h
60
struct bio *bidi_bio;
include/linux/ceph/messenger.h
133
struct bio *bio;
include/linux/ceph/messenger.h
144
bio_advance_iter((it)->bio, &(it)->iter, __cur_n); \
include/linux/ceph/messenger.h
145
if (!(it)->iter.bi_size && (it)->bio->bi_next) { \
include/linux/ceph/messenger.h
147
(it)->bio = (it)->bio->bi_next; \
include/linux/ceph/messenger.h
148
(it)->iter = (it)->bio->bi_iter; \
include/linux/ceph/messenger.h
170
__bio_for_each_segment(bv, (it)->bio, __cur_iter, __cur_iter) \
include/linux/device-mapper.h
433
void *dm_per_bio_data(struct bio *bio, size_t data_size);
include/linux/device-mapper.h
434
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
include/linux/device-mapper.h
435
unsigned int dm_bio_get_target_bio_nr(const struct bio *bio);
include/linux/device-mapper.h
437
u64 dm_start_time_ns_from_clone(struct bio *bio);
include/linux/device-mapper.h
535
void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
include/linux/device-mapper.h
536
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
include/linux/device-mapper.h
62
typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
include/linux/device-mapper.h
79
struct bio *bio, blk_status_t *error);
include/linux/dm-io.h
46
struct bio *bio;
include/linux/dm-region-hash.h
53
region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio);
include/linux/dm-region-hash.h
82
void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
include/linux/dm-region-hash.h
84
void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
include/linux/fs.h
3092
typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
include/linux/fs.h
55
struct bio;
include/linux/fscrypt.h
452
bool fscrypt_decrypt_bio(struct bio *bio);
include/linux/fscrypt.h
753
static inline bool fscrypt_decrypt_bio(struct bio *bio)
include/linux/fscrypt.h
868
void fscrypt_set_bio_crypt_ctx(struct bio *bio,
include/linux/fscrypt.h
872
void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
include/linux/fscrypt.h
876
bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
include/linux/fscrypt.h
879
bool fscrypt_mergeable_bio_bh(struct bio *bio,
include/linux/fscrypt.h
893
static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio,
include/linux/fscrypt.h
898
struct bio *bio,
include/linux/fscrypt.h
902
static inline bool fscrypt_mergeable_bio(struct bio *bio,
include/linux/fscrypt.h
909
static inline bool fscrypt_mergeable_bio_bh(struct bio *bio,
include/linux/fsverity.h
202
void fsverity_verify_bio(struct fsverity_info *vi, struct bio *bio);
include/linux/fsverity.h
274
struct bio *bio)
include/linux/iomap.h
423
struct bio io_bio; /* MUST BE LAST! */
include/linux/iomap.h
426
static inline struct iomap_ioend *iomap_ioend_from_bio(struct bio *bio)
include/linux/iomap.h
428
return container_of(bio, struct iomap_ioend, io_bio);
include/linux/iomap.h
471
struct iomap_ioend *iomap_init_ioend(struct inode *inode, struct bio *bio,
include/linux/iomap.h
527
void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
include/linux/iomap.h
585
void iomap_dio_bio_end_io(struct bio *bio);
include/linux/libnvdimm.h
126
struct bio;
include/linux/libnvdimm.h
142
int (*flush)(struct nd_region *nd_region, struct bio *bio);
include/linux/libnvdimm.h
301
int nvdimm_flush(struct nd_region *nd_region, struct bio *bio);
include/linux/swap.h
21
struct bio;
include/linux/writeback.h
16
struct bio;
include/linux/writeback.h
256
static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
include/linux/writeback.h
265
bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
include/linux/writeback.h
289
static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
include/trace/events/bcache.h
102
__entry->dev = bio_dev(bio);
include/trace/events/bcache.h
103
__entry->sector = bio->bi_iter.bi_sector;
include/trace/events/bcache.h
104
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
include/trace/events/bcache.h
105
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
include/trace/events/bcache.h
11
TP_PROTO(struct bcache_device *d, struct bio *bio),
include/trace/events/bcache.h
114
TP_PROTO(struct bio *bio),
include/trace/events/bcache.h
115
TP_ARGS(bio)
include/trace/events/bcache.h
119
TP_PROTO(struct bio *bio),
include/trace/events/bcache.h
12
TP_ARGS(d, bio),
include/trace/events/bcache.h
120
TP_ARGS(bio)
include/trace/events/bcache.h
124
TP_PROTO(struct bio *bio, bool hit, bool bypass),
include/trace/events/bcache.h
125
TP_ARGS(bio, hit, bypass),
include/trace/events/bcache.h
137
__entry->dev = bio_dev(bio);
include/trace/events/bcache.h
138
__entry->sector = bio->bi_iter.bi_sector;
include/trace/events/bcache.h
139
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
include/trace/events/bcache.h
140
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
include/trace/events/bcache.h
152
TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
include/trace/events/bcache.h
154
TP_ARGS(c, inode, bio, writeback, bypass),
include/trace/events/bcache.h
169
__entry->sector = bio->bi_iter.bi_sector;
include/trace/events/bcache.h
170
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
include/trace/events/bcache.h
171
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
include/trace/events/bcache.h
183
TP_PROTO(struct bio *bio),
include/trace/events/bcache.h
184
TP_ARGS(bio)
include/trace/events/bcache.h
225
TP_PROTO(struct bio *bio, u32 keys),
include/trace/events/bcache.h
226
TP_ARGS(bio, keys),
include/trace/events/bcache.h
237
__entry->dev = bio_dev(bio);
include/trace/events/bcache.h
238
__entry->sector = bio->bi_iter.bi_sector;
include/trace/events/bcache.h
239
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
include/trace/events/bcache.h
241
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
include/trace/events/bcache.h
25
__entry->dev = bio_dev(bio);
include/trace/events/bcache.h
28
__entry->sector = bio->bi_iter.bi_sector;
include/trace/events/bcache.h
29
__entry->orig_sector = bio->bi_iter.bi_sector - 16;
include/trace/events/bcache.h
30
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
include/trace/events/bcache.h
31
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
include/trace/events/bcache.h
81
TP_PROTO(struct bcache_device *d, struct bio *bio),
include/trace/events/bcache.h
82
TP_ARGS(d, bio)
include/trace/events/bcache.h
86
TP_PROTO(struct bcache_device *d, struct bio *bio),
include/trace/events/bcache.h
87
TP_ARGS(d, bio)
include/trace/events/bcache.h
91
TP_PROTO(struct bio *bio),
include/trace/events/bcache.h
92
TP_ARGS(bio),
include/trace/events/block.h
309
TP_PROTO(struct request_queue *q, struct bio *bio),
include/trace/events/block.h
311
TP_ARGS(q, bio),
include/trace/events/block.h
322
__entry->dev = bio_dev(bio);
include/trace/events/block.h
323
__entry->sector = bio->bi_iter.bi_sector;
include/trace/events/block.h
324
__entry->nr_sector = bio_sectors(bio);
include/trace/events/block.h
325
__entry->error = blk_status_to_errno(bio->bi_status);
include/trace/events/block.h
326
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
include/trace/events/block.h
337
TP_PROTO(struct bio *bio),
include/trace/events/block.h
339
TP_ARGS(bio),
include/trace/events/block.h
350
__entry->dev = bio_dev(bio);
include/trace/events/block.h
351
__entry->sector = bio->bi_iter.bi_sector;
include/trace/events/block.h
352
__entry->nr_sector = bio_sectors(bio);
include/trace/events/block.h
353
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
include/trace/events/block.h
370
TP_PROTO(struct bio *bio),
include/trace/events/block.h
371
TP_ARGS(bio)
include/trace/events/block.h
381
TP_PROTO(struct bio *bio),
include/trace/events/block.h
382
TP_ARGS(bio)
include/trace/events/block.h
392
TP_PROTO(struct bio *bio),
include/trace/events/block.h
393
TP_ARGS(bio)
include/trace/events/block.h
403
TP_PROTO(struct bio *bio),
include/trace/events/block.h
404
TP_ARGS(bio)
include/trace/events/block.h
490
TP_PROTO(struct bio *bio, unsigned int new_sector),
include/trace/events/block.h
492
TP_ARGS(bio, new_sector),
include/trace/events/block.h
503
__entry->dev = bio_dev(bio);
include/trace/events/block.h
504
__entry->sector = bio->bi_iter.bi_sector;
include/trace/events/block.h
506
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
include/trace/events/block.h
528
TP_PROTO(struct bio *bio, dev_t dev, sector_t from),
include/trace/events/block.h
530
TP_ARGS(bio, dev, from),
include/trace/events/block.h
542
__entry->dev = bio_dev(bio);
include/trace/events/block.h
543
__entry->sector = bio->bi_iter.bi_sector;
include/trace/events/block.h
544
__entry->nr_sector = bio_sectors(bio);
include/trace/events/block.h
547
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
include/trace/events/block.h
613
TP_PROTO(struct bio *bio, sector_t nr_sectors),
include/trace/events/block.h
615
TP_ARGS(bio, nr_sectors),
include/trace/events/block.h
625
__entry->dev = bio_dev(bio);
include/trace/events/block.h
626
__entry->sector = bio->bi_iter.bi_sector;
include/trace/events/block.h
627
__entry->nr_sectors = bio_sectors(bio);
include/trace/events/block.h
628
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
include/trace/events/btrfs.h
2393
const struct bio *bio,
include/trace/events/btrfs.h
2396
TP_ARGS(rbio, bio, trace_info),
include/trace/events/btrfs.h
2413
__entry->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
include/trace/events/btrfs.h
2414
__entry->len = bio->bi_iter.bi_size;
include/trace/events/btrfs.h
2415
__entry->opf = bio_op(bio);
include/trace/events/btrfs.h
2443
const struct bio *bio,
include/trace/events/btrfs.h
2446
TP_ARGS(rbio, bio, trace_info)
include/trace/events/btrfs.h
2451
const struct bio *bio,
include/trace/events/btrfs.h
2454
TP_ARGS(rbio, bio, trace_info)
include/trace/events/f2fs.h
1233
TP_PROTO(struct super_block *sb, int type, struct bio *bio),
include/trace/events/f2fs.h
1235
TP_ARGS(sb, type, bio),
include/trace/events/f2fs.h
1249
__entry->target = bio_dev(bio);
include/trace/events/f2fs.h
1250
__entry->op = bio_op(bio);
include/trace/events/f2fs.h
1251
__entry->op_flags = bio->bi_opf;
include/trace/events/f2fs.h
1253
__entry->sector = bio->bi_iter.bi_sector;
include/trace/events/f2fs.h
1254
__entry->size = bio->bi_iter.bi_size;
include/trace/events/f2fs.h
1268
TP_PROTO(struct super_block *sb, int type, struct bio *bio),
include/trace/events/f2fs.h
1270
TP_ARGS(sb, type, bio),
include/trace/events/f2fs.h
1272
TP_CONDITION(bio)
include/trace/events/f2fs.h
1277
TP_PROTO(struct super_block *sb, int type, struct bio *bio),
include/trace/events/f2fs.h
1279
TP_ARGS(sb, type, bio),
include/trace/events/f2fs.h
1281
TP_CONDITION(bio)
include/trace/events/f2fs.h
1286
TP_PROTO(struct super_block *sb, int type, struct bio *bio),
include/trace/events/f2fs.h
1288
TP_ARGS(sb, type, bio),
include/trace/events/f2fs.h
1290
TP_CONDITION(bio)
include/trace/events/f2fs.h
1295
TP_PROTO(struct super_block *sb, int type, struct bio *bio),
include/trace/events/f2fs.h
1297
TP_ARGS(sb, type, bio),
include/trace/events/f2fs.h
1299
TP_CONDITION(bio)
kernel/power/swap.c
241
static void hib_end_io(struct bio *bio)
kernel/power/swap.c
243
struct hib_bio_batch *hb = bio->bi_private;
kernel/power/swap.c
244
struct page *page = bio_first_page_all(bio);
kernel/power/swap.c
246
if (bio->bi_status) {
kernel/power/swap.c
248
MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
kernel/power/swap.c
249
(unsigned long long)bio->bi_iter.bi_sector);
kernel/power/swap.c
252
if (bio_data_dir(bio) == WRITE)
kernel/power/swap.c
258
if (bio->bi_status && !hb->error)
kernel/power/swap.c
259
hb->error = bio->bi_status;
kernel/power/swap.c
263
bio_put(bio);
kernel/power/swap.c
275
struct bio *bio;
kernel/power/swap.c
277
bio = bio_alloc(file_bdev(hib_resume_bdev_file), 1, opf,
kernel/power/swap.c
279
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
kernel/power/swap.c
280
bio_add_virt_nofail(bio, addr, PAGE_SIZE);
kernel/power/swap.c
281
bio->bi_end_io = hib_end_io;
kernel/power/swap.c
282
bio->bi_private = hb;
kernel/power/swap.c
284
submit_bio(bio);
kernel/trace/blktrace.c
1002
return blk_trace_bio_get_cgid(rq->q, rq->bio);
kernel/trace/blktrace.c
1101
static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
kernel/trace/blktrace.c
1113
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
kernel/trace/blktrace.c
1114
bio->bi_opf, what, error, 0, NULL,
kernel/trace/blktrace.c
1115
blk_trace_bio_get_cgid(q, bio));
kernel/trace/blktrace.c
1120
struct request_queue *q, struct bio *bio)
kernel/trace/blktrace.c
1122
blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
kernel/trace/blktrace.c
1123
blk_status_to_errno(bio->bi_status));
kernel/trace/blktrace.c
1126
static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
kernel/trace/blktrace.c
1128
blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE,
kernel/trace/blktrace.c
1132
static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
kernel/trace/blktrace.c
1134
blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE,
kernel/trace/blktrace.c
1138
static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
kernel/trace/blktrace.c
1140
blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0);
kernel/trace/blktrace.c
1143
static void blk_add_trace_getrq(void *ignore, struct bio *bio)
kernel/trace/blktrace.c
1145
blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0);
kernel/trace/blktrace.c
1211
static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
kernel/trace/blktrace.c
1213
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
kernel/trace/blktrace.c
1221
__blk_add_trace(bt, bio->bi_iter.bi_sector,
kernel/trace/blktrace.c
1222
bio->bi_iter.bi_size, bio->bi_opf, BLK_TA_SPLIT,
kernel/trace/blktrace.c
1223
blk_status_to_errno(bio->bi_status),
kernel/trace/blktrace.c
1225
blk_trace_bio_get_cgid(q, bio));
kernel/trace/blktrace.c
1239
static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
kernel/trace/blktrace.c
1242
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
kernel/trace/blktrace.c
1254
r.device_to = cpu_to_be32(bio_dev(bio));
kernel/trace/blktrace.c
1257
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
kernel/trace/blktrace.c
1258
bio->bi_opf, BLK_TA_REMAP,
kernel/trace/blktrace.c
1259
blk_status_to_errno(bio->bi_status),
kernel/trace/blktrace.c
1260
sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
kernel/trace/blktrace.c
974
static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
kernel/trace/blktrace.c
984
blkcg_css = bio_blkcg_css(bio);
kernel/trace/blktrace.c
990
static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
kernel/trace/blktrace.c
999
if (!rq->bio)
mm/page_io.c
30
static void __end_swap_bio_write(struct bio *bio)
mm/page_io.c
305
static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
mm/page_io.c
316
bio_associate_blkg_from_css(bio, css);
mm/page_io.c
32
struct folio *folio = bio_first_folio_all(bio);
mm/page_io.c
320
#define bio_associate_blkg_from_page(bio, folio) do { } while (0)
mm/page_io.c
34
if (bio->bi_status) {
mm/page_io.c
414
struct bio bio;
mm/page_io.c
416
bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_WRITE | REQ_SWAP);
mm/page_io.c
417
bio.bi_iter.bi_sector = swap_folio_sector(folio);
mm/page_io.c
418
bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
mm/page_io.c
420
bio_associate_blkg_from_page(&bio, folio);
mm/page_io.c
426
submit_bio_wait(&bio);
mm/page_io.c
427
__end_swap_bio_write(&bio);
mm/page_io.c
433
struct bio *bio;
mm/page_io.c
435
bio = bio_alloc(sis->bdev, 1, REQ_OP_WRITE | REQ_SWAP, GFP_NOIO);
mm/page_io.c
436
bio->bi_iter.bi_sector = swap_folio_sector(folio);
mm/page_io.c
437
bio->bi_end_io = end_swap_bio_write;
mm/page_io.c
438
bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
mm/page_io.c
440
bio_associate_blkg_from_page(bio, folio);
mm/page_io.c
444
submit_bio(bio);
mm/page_io.c
45
MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
mm/page_io.c
46
(unsigned long long)bio->bi_iter.bi_sector);
mm/page_io.c
52
static void end_swap_bio_write(struct bio *bio)
mm/page_io.c
54
__end_swap_bio_write(bio);
mm/page_io.c
55
bio_put(bio);
mm/page_io.c
576
struct bio bio;
mm/page_io.c
578
bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
mm/page_io.c
579
bio.bi_iter.bi_sector = swap_folio_sector(folio);
mm/page_io.c
58
static void __end_swap_bio_read(struct bio *bio)
mm/page_io.c
580
bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
mm/page_io.c
589
submit_bio_wait(&bio);
mm/page_io.c
590
__end_swap_bio_read(&bio);
mm/page_io.c
597
struct bio *bio;
mm/page_io.c
599
bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
mm/page_io.c
60
struct folio *folio = bio_first_folio_all(bio);
mm/page_io.c
600
bio->bi_iter.bi_sector = swap_folio_sector(folio);
mm/page_io.c
601
bio->bi_end_io = end_swap_bio_read;
mm/page_io.c
602
bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
mm/page_io.c
606
submit_bio(bio);
mm/page_io.c
62
if (bio->bi_status) {
mm/page_io.c
64
MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
mm/page_io.c
65
(unsigned long long)bio->bi_iter.bi_sector);
mm/page_io.c
72
static void end_swap_bio_read(struct bio *bio)
mm/page_io.c
74
__end_swap_bio_read(bio);
mm/page_io.c
75
bio_put(bio);
net/ceph/messenger.c
736
BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
net/ceph/messenger.c
743
struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio,
net/ceph/messenger.c
755
struct page *page = bio_iter_page(it->bio, it->iter);
net/ceph/messenger.c
758
BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
net/ceph/messenger.c
760
bio_advance_iter(it->bio, &it->iter, bytes);
net/ceph/messenger.c
766
page == bio_iter_page(it->bio, it->iter)))
net/ceph/messenger.c
770
it->bio = it->bio->bi_next;
net/ceph/messenger.c
771
it->iter = it->bio->bi_iter;
net/ceph/messenger.c
776
BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
tools/testing/selftests/sgx/sigstruct.c
164
BIO *bio;
tools/testing/selftests/sgx/sigstruct.c
170
bio = BIO_new_mem_buf(&sign_key, sign_key_length);
tools/testing/selftests/sgx/sigstruct.c
171
if (!bio)
tools/testing/selftests/sgx/sigstruct.c
174
key = PEM_read_bio_RSAPrivateKey(bio, NULL, NULL, NULL);
tools/testing/selftests/sgx/sigstruct.c
175
BIO_free(bio);