block/fops.c
133
struct blkdev_dio *dio = bio->bi_private;
block/fops.c
134
bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
block/fops.c
135
bool is_sync = dio->flags & DIO_IS_SYNC;
block/fops.c
137
if (bio->bi_status && !dio->bio.bi_status)
block/fops.c
138
dio->bio.bi_status = bio->bi_status;
block/fops.c
143
if (atomic_dec_and_test(&dio->ref)) {
block/fops.c
145
struct kiocb *iocb = dio->iocb;
block/fops.c
150
if (likely(!dio->bio.bi_status)) {
block/fops.c
151
ret = dio->size;
block/fops.c
154
ret = blk_status_to_errno(dio->bio.bi_status);
block/fops.c
157
dio->iocb->ki_complete(iocb, ret);
block/fops.c
158
bio_put(&dio->bio);
block/fops.c
160
struct task_struct *waiter = dio->waiter;
block/fops.c
162
WRITE_ONCE(dio->waiter, NULL);
block/fops.c
179
struct blkdev_dio *dio;
block/fops.c
188
dio = container_of(bio, struct blkdev_dio, bio);
block/fops.c
189
atomic_set(&dio->ref, 1);
block/fops.c
198
dio->flags = DIO_IS_SYNC;
block/fops.c
199
dio->waiter = current;
block/fops.c
201
dio->flags = 0;
block/fops.c
202
dio->iocb = iocb;
block/fops.c
205
dio->size = 0;
block/fops.c
207
dio->flags |= DIO_SHOULD_DIRTY;
block/fops.c
215
bio->bi_private = dio;
block/fops.c
247
if (dio->flags & DIO_SHOULD_DIRTY)
block/fops.c
252
dio->size += bio->bi_iter.bi_size;
block/fops.c
260
atomic_inc(&dio->ref);
block/fops.c
272
if (!READ_ONCE(dio->waiter))
block/fops.c
279
ret = blk_status_to_errno(dio->bio.bi_status);
block/fops.c
281
ret = dio->size;
block/fops.c
283
bio_put(&dio->bio);
block/fops.c
295
struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
block/fops.c
296
struct kiocb *iocb = dio->iocb;
block/fops.c
302
ret = dio->size;
block/fops.c
313
if (dio->flags & DIO_SHOULD_DIRTY) {
block/fops.c
328
struct blkdev_dio *dio;
block/fops.c
335
dio = container_of(bio, struct blkdev_dio, bio);
block/fops.c
336
dio->flags = 0;
block/fops.c
337
dio->iocb = iocb;
block/fops.c
357
dio->size = bio->bi_iter.bi_size;
block/fops.c
361
dio->flags |= DIO_SHOULD_DIRTY;
drivers/block/loop.c
702
int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
drivers/block/loop.c
704
return sysfs_emit(buf, "%s\n", dio ? "1" : "0");
drivers/block/loop.c
712
LOOP_ATTR_RO(dio);
drivers/comedi/drivers/ni_670x.c
68
int dio;
drivers/comedi/drivers/ni_pcidio.c
286
int dio;
drivers/gpu/drm/amd/display/dc/dc.h
830
bool dio : 1; /* Display output */
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_dio.c
19
static void dcn10_dio_mem_pwr_ctrl(struct dio *dio, bool enable_i2c_light_sleep)
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_dio.c
21
struct dcn10_dio *dio10 = TO_DCN10_DIO(dio);
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_dio.h
29
struct dio base;
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
1885
if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl)
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
1886
dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false);
drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
365
if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl)
drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
366
dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false);
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
799
if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl)
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
800
dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false);
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
242
if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl)
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
243
dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, dc->debug.enable_mem_low_power.bits.i2c);
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
961
if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl)
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
962
dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false);
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
277
if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl)
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
278
dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, !dc->debug.enable_mem_low_power.bits.i2c);
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
329
if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl)
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
330
dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false);
drivers/gpu/drm/amd/display/dc/inc/core_types.h
254
struct dio *dio;
drivers/gpu/drm/amd/display/dc/inc/hw/dio.h
11
struct dio;
drivers/gpu/drm/amd/display/dc/inc/hw/dio.h
14
void (*mem_pwr_ctrl)(struct dio *dio, bool enable_i2c_light_sleep);
drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
1690
pool->base.dio = dcn10_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
1691
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
463
static struct dio *dcn10_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
948
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
949
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
950
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
1136
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
1137
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
1138
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
2730
pool->base.dio = dcn20_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
2731
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
569
static struct dio *dcn20_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
1313
pool->base.dio = dcn201_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
1314
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
774
static struct dio *dcn201_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
961
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
962
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
963
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
1684
pool->base.dio = dcn21_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
1685
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
341
static struct dio *dcn21_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
701
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
702
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
703
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
1127
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
1128
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
1129
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
2502
pool->base.dio = dcn30_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
2503
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
905
static struct dio *dcn30_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
1098
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
1099
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
1100
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
1620
pool->base.dio = dcn301_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
1621
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
862
static struct dio *dcn301_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
1053
if (pool->dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
1054
kfree(TO_DCN10_DIO(pool->dio));
drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
1055
pool->dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
1409
pool->dio = dcn302_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
1410
if (pool->dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
272
static struct dio *dcn302_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
1341
pool->dio = dcn303_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
1342
if (pool->dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
268
static struct dio *dcn303_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
997
if (pool->dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
998
kfree(TO_DCN10_DIO(pool->dio));
drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
999
pool->dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
1040
static struct dio *dcn31_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
1427
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
1428
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
1429
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
2099
pool->base.dio = dcn31_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
2100
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
1098
static struct dio *dcn314_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
1486
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
1487
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
1488
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
2023
pool->base.dio = dcn314_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
2024
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
1039
static struct dio *dcn315_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
1428
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
1429
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
1430
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
2048
pool->base.dio = dcn315_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
2049
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
1032
static struct dio *dcn316_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
1423
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
1424
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
1425
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
1923
pool->base.dio = dcn316_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
1924
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
1527
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
1528
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
1529
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
2412
pool->base.dio = dcn32_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
2413
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
850
static struct dio *dcn32_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
1507
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
1508
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
1509
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
1911
pool->base.dio = dcn321_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
1912
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
844
static struct dio *dcn321_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
1597
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
1598
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
1599
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
2072
pool->base.dio = dcn35_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
2073
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
990
static struct dio *dcn35_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
1577
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
1578
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
1579
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
2044
pool->base.dio = dcn351_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
2045
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
970
static struct dio *dcn351_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
1584
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
1585
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
1586
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
2051
pool->base.dio = dcn36_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
2052
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
977
static struct dio *dcn36_dio_create(struct dc_context *ctx)
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
1533
if (pool->base.dio != NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
1534
kfree(TO_DCN10_DIO(pool->base.dio));
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
1535
pool->base.dio = NULL;
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
2109
pool->base.dio = dcn401_dio_create(ctx);
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
2110
if (pool->base.dio == NULL) {
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
900
static struct dio *dcn401_dio_create(struct dc_context *ctx)
drivers/md/dm-integrity.c
1566
static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
drivers/md/dm-integrity.c
1572
bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
1598
static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
drivers/md/dm-integrity.c
1600
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
1602
if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
drivers/md/dm-integrity.c
1603
submit_flush_bio(ic, dio);
drivers/md/dm-integrity.c
1608
static void dec_in_flight(struct dm_integrity_io *dio)
drivers/md/dm-integrity.c
1610
if (atomic_dec_and_test(&dio->in_flight)) {
drivers/md/dm-integrity.c
1611
struct dm_integrity_c *ic = dio->ic;
drivers/md/dm-integrity.c
1614
remove_range(ic, &dio->range);
drivers/md/dm-integrity.c
1616
if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
drivers/md/dm-integrity.c
1619
bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
1620
if (unlikely(dio->bi_status) && !bio->bi_status)
drivers/md/dm-integrity.c
1621
bio->bi_status = dio->bi_status;
drivers/md/dm-integrity.c
1622
if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
drivers/md/dm-integrity.c
1623
dio->range.logical_sector += dio->range.n_sectors;
drivers/md/dm-integrity.c
1624
bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
drivers/md/dm-integrity.c
1625
INIT_WORK(&dio->work, integrity_bio_wait);
drivers/md/dm-integrity.c
1626
queue_work(ic->offload_wq, &dio->work);
drivers/md/dm-integrity.c
1629
do_endio_flush(ic, dio);
drivers/md/dm-integrity.c
1635
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
1637
dm_bio_restore(&dio->bio_details, bio);
drivers/md/dm-integrity.c
1641
if (dio->completion)
drivers/md/dm-integrity.c
1642
complete(dio->completion);
drivers/md/dm-integrity.c
1644
dec_in_flight(dio);
drivers/md/dm-integrity.c
1798
static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
drivers/md/dm-integrity.c
1800
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
1801
struct dm_integrity_c *ic = dio->ic;
drivers/md/dm-integrity.c
1807
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
drivers/md/dm-integrity.c
1808
dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
drivers/md/dm-integrity.c
1809
&dio->metadata_offset);
drivers/md/dm-integrity.c
1811
logical_sector = dio->range.logical_sector;
drivers/md/dm-integrity.c
1815
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
drivers/md/dm-integrity.c
1836
alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
drivers/md/dm-integrity.c
1845
dio->bi_status = errno_to_blk_status(r);
drivers/md/dm-integrity.c
1849
integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, integrity_identity(ic, buffer), buffer_offset, checksum);
drivers/md/dm-integrity.c
1850
r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
drivers/md/dm-integrity.c
1851
&dio->metadata_offset, ic->tag_size, TAG_CMP);
drivers/md/dm-integrity.c
1861
dio->bi_status = errno_to_blk_status(r);
drivers/md/dm-integrity.c
1880
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
drivers/md/dm-integrity.c
1881
struct dm_integrity_c *ic = dio->ic;
drivers/md/dm-integrity.c
1889
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
1899
if (likely(dio->op != REQ_OP_DISCARD))
drivers/md/dm-integrity.c
1913
if (unlikely(dio->op == REQ_OP_DISCARD)) {
drivers/md/dm-integrity.c
1914
unsigned int bi_size = dio->bio_details.bi_iter.bi_size;
drivers/md/dm-integrity.c
1924
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
drivers/md/dm-integrity.c
1940
sector = dio->range.logical_sector;
drivers/md/dm-integrity.c
1941
sectors_to_process = dio->range.n_sectors;
drivers/md/dm-integrity.c
1943
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
drivers/md/dm-integrity.c
1953
integrity_sector_checksum(ic, &dio->ahash_req, sector, mem, bv_copy.bv_offset + pos, checksums_ptr);
drivers/md/dm-integrity.c
1961
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
drivers/md/dm-integrity.c
1962
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
drivers/md/dm-integrity.c
1967
integrity_recheck(dio, checksums_onstack);
drivers/md/dm-integrity.c
1986
struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
drivers/md/dm-integrity.c
1991
unsigned int data_to_process = dio->range.n_sectors;
drivers/md/dm-integrity.c
2003
r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
drivers/md/dm-integrity.c
2004
this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
drivers/md/dm-integrity.c
2014
dec_in_flight(dio);
drivers/md/dm-integrity.c
2017
dio->bi_status = errno_to_blk_status(r);
drivers/md/dm-integrity.c
2018
dec_in_flight(dio);
drivers/md/dm-integrity.c
2053
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2058
dio->ic = ic;
drivers/md/dm-integrity.c
2059
dio->bi_status = 0;
drivers/md/dm-integrity.c
2060
dio->op = bio_op(bio);
drivers/md/dm-integrity.c
2061
dio->ahash_req = NULL;
drivers/md/dm-integrity.c
2065
dio->integrity_payload = NULL;
drivers/md/dm-integrity.c
2066
dio->integrity_payload_from_mempool = false;
drivers/md/dm-integrity.c
2067
dio->integrity_range_locked = false;
drivers/md/dm-integrity.c
2068
return dm_integrity_map_inline(dio, true);
drivers/md/dm-integrity.c
2071
if (unlikely(dio->op == REQ_OP_DISCARD)) {
drivers/md/dm-integrity.c
2087
submit_flush_bio(ic, dio);
drivers/md/dm-integrity.c
2091
dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
drivers/md/dm-integrity.c
2092
dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
drivers/md/dm-integrity.c
2093
if (unlikely(dio->fua)) {
drivers/md/dm-integrity.c
2100
if (unlikely(!dm_integrity_check_limits(ic, dio->range.logical_sector, bio)))
drivers/md/dm-integrity.c
2125
if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
drivers/md/dm-integrity.c
2128
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
drivers/md/dm-integrity.c
2129
dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
drivers/md/dm-integrity.c
2132
dm_integrity_map_continue(dio, true);
drivers/md/dm-integrity.c
2136
static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
drivers/md/dm-integrity.c
2139
struct dm_integrity_c *ic = dio->ic;
drivers/md/dm-integrity.c
2143
logical_sector = dio->range.logical_sector;
drivers/md/dm-integrity.c
2144
n_sectors = dio->range.n_sectors;
drivers/md/dm-integrity.c
2155
if (likely(dio->op == REQ_OP_WRITE))
drivers/md/dm-integrity.c
2161
if (unlikely(dio->op == REQ_OP_READ)) {
drivers/md/dm-integrity.c
2199
if (likely(dio->op == REQ_OP_WRITE))
drivers/md/dm-integrity.c
2207
} else if (likely(dio->op == REQ_OP_WRITE))
drivers/md/dm-integrity.c
2211
if (likely(dio->op == REQ_OP_WRITE)) {
drivers/md/dm-integrity.c
2231
integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, js_page, js_offset, checksums_onstack);
drivers/md/dm-integrity.c
2234
integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, js_page, js_offset, journal_entry_tag(ic, je));
drivers/md/dm-integrity.c
2251
if (unlikely(dio->op == REQ_OP_READ))
drivers/md/dm-integrity.c
2256
if (likely(dio->op == REQ_OP_WRITE)) {
drivers/md/dm-integrity.c
2265
remove_range(ic, &dio->range);
drivers/md/dm-integrity.c
2270
dio->range.logical_sector = logical_sector;
drivers/md/dm-integrity.c
2271
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
drivers/md/dm-integrity.c
2272
dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
drivers/md/dm-integrity.c
2279
static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
drivers/md/dm-integrity.c
2281
struct dm_integrity_c *ic = dio->ic;
drivers/md/dm-integrity.c
2282
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2288
bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
drivers/md/dm-integrity.c
2290
if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
drivers/md/dm-integrity.c
2294
INIT_WORK(&dio->work, integrity_bio_wait);
drivers/md/dm-integrity.c
2295
queue_work(ic->offload_wq, &dio->work);
drivers/md/dm-integrity.c
2307
dio->range.n_sectors = bio_sectors(bio);
drivers/md/dm-integrity.c
2309
if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
drivers/md/dm-integrity.c
2310
if (dio->op == REQ_OP_WRITE) {
drivers/md/dm-integrity.c
2314
dio->range.n_sectors = min(dio->range.n_sectors,
drivers/md/dm-integrity.c
2316
if (unlikely(!dio->range.n_sectors)) {
drivers/md/dm-integrity.c
2322
range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
drivers/md/dm-integrity.c
2340
add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
drivers/md/dm-integrity.c
2354
} while ((i += ic->sectors_per_block) < dio->range.n_sectors);
drivers/md/dm-integrity.c
2361
journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
drivers/md/dm-integrity.c
2363
if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
drivers/md/dm-integrity.c
2364
dio->range.n_sectors = next_sector - dio->range.logical_sector;
drivers/md/dm-integrity.c
2369
for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
drivers/md/dm-integrity.c
2370
if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
drivers/md/dm-integrity.c
2373
dio->range.n_sectors = i;
drivers/md/dm-integrity.c
2377
if (unlikely(!add_new_range(ic, &dio->range, true))) {
drivers/md/dm-integrity.c
2386
INIT_WORK(&dio->work, integrity_bio_wait);
drivers/md/dm-integrity.c
2387
queue_work(ic->wait_wq, &dio->work);
drivers/md/dm-integrity.c
2391
dio->range.n_sectors = ic->sectors_per_block;
drivers/md/dm-integrity.c
2392
wait_and_add_new_range(ic, &dio->range);
drivers/md/dm-integrity.c
2402
new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
drivers/md/dm-integrity.c
2404
remove_range_unlocked(ic, &dio->range);
drivers/md/dm-integrity.c
2409
if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
drivers/md/dm-integrity.c
2413
new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
drivers/md/dm-integrity.c
2415
unlikely(next_sector < dio->range.logical_sector + dio->range.n_sectors)) {
drivers/md/dm-integrity.c
2416
remove_range_unlocked(ic, &dio->range);
drivers/md/dm-integrity.c
2435
if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
drivers/md/dm-integrity.c
2436
if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
drivers/md/dm-integrity.c
2437
dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
drivers/md/dm-integrity.c
2440
bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
drivers/md/dm-integrity.c
2449
dio->in_flight = (atomic_t)ATOMIC_INIT(2);
drivers/md/dm-integrity.c
2453
dio->completion = &read_comp;
drivers/md/dm-integrity.c
2455
dio->completion = NULL;
drivers/md/dm-integrity.c
2457
dm_bio_record(&dio->bio_details, bio);
drivers/md/dm-integrity.c
2462
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
drivers/md/dm-integrity.c
2464
if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
drivers/md/dm-integrity.c
2465
integrity_metadata(&dio->work);
drivers/md/dm-integrity.c
2468
dio->in_flight = (atomic_t)ATOMIC_INIT(1);
drivers/md/dm-integrity.c
2469
dio->completion = NULL;
drivers/md/dm-integrity.c
2481
dio->range.logical_sector + dio->range.n_sectors > recalc_sector)
drivers/md/dm-integrity.c
2484
if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
drivers/md/dm-integrity.c
2485
dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
drivers/md/dm-integrity.c
2490
integrity_metadata(&dio->work);
drivers/md/dm-integrity.c
2493
dec_in_flight(dio);
drivers/md/dm-integrity.c
2495
INIT_WORK(&dio->work, integrity_metadata);
drivers/md/dm-integrity.c
2496
queue_work(ic->metadata_wq, &dio->work);
drivers/md/dm-integrity.c
2502
if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
drivers/md/dm-integrity.c
2505
do_endio_flush(ic, dio);
drivers/md/dm-integrity.c
2508
static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map)
drivers/md/dm-integrity.c
2510
struct dm_integrity_c *ic = dio->ic;
drivers/md/dm-integrity.c
2511
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2527
if (!dio->integrity_payload) {
drivers/md/dm-integrity.c
2529
dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block);
drivers/md/dm-integrity.c
2532
dio->payload_len += extra_size;
drivers/md/dm-integrity.c
2533
dio->integrity_payload = kmalloc(dio->payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
drivers/md/dm-integrity.c
2534
if (unlikely(!dio->integrity_payload)) {
drivers/md/dm-integrity.c
2536
if (dio->payload_len > x_size) {
drivers/md/dm-integrity.c
2549
dio->range.logical_sector = bio->bi_iter.bi_sector;
drivers/md/dm-integrity.c
2550
dio->range.n_sectors = bio_sectors(bio);
drivers/md/dm-integrity.c
2562
if (likely(dio->range.logical_sector + dio->range.n_sectors <= recalc_sector))
drivers/md/dm-integrity.c
2567
if (dio->range.logical_sector + dio->range.n_sectors <= recalc_sector)
drivers/md/dm-integrity.c
2569
if (unlikely(!add_new_range(ic, &dio->range, true))) {
drivers/md/dm-integrity.c
2572
INIT_WORK(&dio->work, integrity_bio_wait);
drivers/md/dm-integrity.c
2573
queue_work(ic->wait_wq, &dio->work);
drivers/md/dm-integrity.c
2576
wait_and_add_new_range(ic, &dio->range);
drivers/md/dm-integrity.c
2578
dio->integrity_range_locked = true;
drivers/md/dm-integrity.c
2583
if (unlikely(!dio->integrity_payload)) {
drivers/md/dm-integrity.c
2584
dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO));
drivers/md/dm-integrity.c
2585
dio->integrity_payload_from_mempool = true;
drivers/md/dm-integrity.c
2588
dio->bio_details.bi_iter = bio->bi_iter;
drivers/md/dm-integrity.c
2603
if (dio->op == REQ_OP_WRITE) {
drivers/md/dm-integrity.c
2605
while (dio->bio_details.bi_iter.bi_size) {
drivers/md/dm-integrity.c
2606
struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
drivers/md/dm-integrity.c
2609
memset(dio->integrity_payload + pos + ic->tag_size, 0, ic->tuple_size - ic->tuple_size);
drivers/md/dm-integrity.c
2610
integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, mem, bv.bv_offset, dio->integrity_payload + pos);
drivers/md/dm-integrity.c
2613
bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
drivers/md/dm-integrity.c
2617
ret = bio_integrity_add_page(bio, virt_to_page(dio->integrity_payload),
drivers/md/dm-integrity.c
2618
dio->payload_len, offset_in_page(dio->integrity_payload));
drivers/md/dm-integrity.c
2619
if (unlikely(ret != dio->payload_len)) {
drivers/md/dm-integrity.c
2628
static inline void dm_integrity_free_payload(struct dm_integrity_io *dio)
drivers/md/dm-integrity.c
2630
struct dm_integrity_c *ic = dio->ic;
drivers/md/dm-integrity.c
2631
if (unlikely(dio->integrity_payload_from_mempool))
drivers/md/dm-integrity.c
2632
mempool_free(virt_to_page(dio->integrity_payload), &ic->recheck_pool);
drivers/md/dm-integrity.c
2634
kfree(dio->integrity_payload);
drivers/md/dm-integrity.c
2635
dio->integrity_payload = NULL;
drivers/md/dm-integrity.c
2636
dio->integrity_payload_from_mempool = false;
drivers/md/dm-integrity.c
2641
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
drivers/md/dm-integrity.c
2642
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2643
struct dm_integrity_c *ic = dio->ic;
drivers/md/dm-integrity.c
2647
dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO));
drivers/md/dm-integrity.c
2648
dio->integrity_payload_from_mempool = true;
drivers/md/dm-integrity.c
2650
outgoing_data = dio->integrity_payload + PAGE_SIZE;
drivers/md/dm-integrity.c
2652
while (dio->bio_details.bi_iter.bi_size) {
drivers/md/dm-integrity.c
2671
r = bio_integrity_add_page(outgoing_bio, virt_to_page(dio->integrity_payload), ic->tuple_size, 0);
drivers/md/dm-integrity.c
2679
outgoing_bio->bi_iter.bi_sector = dio->bio_details.bi_iter.bi_sector + ic->start + SB_SECTORS;
drivers/md/dm-integrity.c
2690
integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, integrity_identity(ic, outgoing_data), 0, digest);
drivers/md/dm-integrity.c
2691
if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(ic->internal_hash_digestsize, ic->tag_size)))) {
drivers/md/dm-integrity.c
2693
ic->dev->bdev, dio->bio_details.bi_iter.bi_sector);
drivers/md/dm-integrity.c
2696
bio, dio->bio_details.bi_iter.bi_sector, 0);
drivers/md/dm-integrity.c
2703
bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
drivers/md/dm-integrity.c
2708
bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
drivers/md/dm-integrity.c
2714
static inline bool dm_integrity_check(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
drivers/md/dm-integrity.c
2716
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2719
while (dio->bio_details.bi_iter.bi_size) {
drivers/md/dm-integrity.c
2721
struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
drivers/md/dm-integrity.c
2723
integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, mem, bv.bv_offset, digest);
drivers/md/dm-integrity.c
2724
if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos,
drivers/md/dm-integrity.c
2727
dm_integrity_free_payload(dio);
drivers/md/dm-integrity.c
2728
INIT_WORK(&dio->work, dm_integrity_inline_recheck);
drivers/md/dm-integrity.c
2729
queue_work(ic->offload_wq, &dio->work);
drivers/md/dm-integrity.c
2734
bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
drivers/md/dm-integrity.c
2742
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
drivers/md/dm-integrity.c
2743
struct dm_integrity_c *ic = dio->ic;
drivers/md/dm-integrity.c
2744
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2746
if (likely(dm_integrity_check(ic, dio)))
drivers/md/dm-integrity.c
2753
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2755
if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK) && likely(dio->bio_details.bi_iter.bi_size != 0)) {
drivers/md/dm-integrity.c
2757
unlikely(dio->integrity_range_locked))
drivers/md/dm-integrity.c
2760
if (unlikely(!dm_integrity_check(ic, dio)))
drivers/md/dm-integrity.c
2763
INIT_WORK(&dio->work, dm_integrity_inline_async_check);
drivers/md/dm-integrity.c
2764
queue_work(ic->offload_wq, &dio->work);
drivers/md/dm-integrity.c
2769
dm_integrity_free_payload(dio);
drivers/md/dm-integrity.c
2770
if (unlikely(dio->integrity_range_locked))
drivers/md/dm-integrity.c
2771
remove_range(ic, &dio->range);
drivers/md/dm-integrity.c
2773
if (unlikely(dio->ahash_req))
drivers/md/dm-integrity.c
2774
mempool_free(dio->ahash_req, &ic->ahash_req_pool);
drivers/md/dm-integrity.c
2780
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
drivers/md/dm-integrity.c
2781
struct dm_integrity_c *ic = dio->ic;
drivers/md/dm-integrity.c
2784
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
2785
int r = dm_integrity_map_inline(dio, false);
drivers/md/dm-integrity.c
2799
dm_integrity_map_continue(dio, false);
drivers/md/dm-integrity.c
3430
struct dm_integrity_io *dio;
drivers/md/dm-integrity.c
3432
dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
3434
if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
drivers/md/dm-integrity.c
3435
dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
drivers/md/dm-integrity.c
3436
remove_range(ic, &dio->range);
drivers/md/dm-integrity.c
3437
INIT_WORK(&dio->work, integrity_bio_wait);
drivers/md/dm-integrity.c
3438
queue_work(ic->offload_wq, &dio->work);
drivers/md/dm-integrity.c
3440
block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
drivers/md/dm-integrity.c
3441
dio->range.n_sectors, BITMAP_OP_SET);
drivers/md/dm-integrity.c
3454
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
drivers/md/dm-integrity.c
3456
block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
drivers/md/dm-integrity.c
3457
dio->range.n_sectors, BITMAP_OP_SET);
drivers/md/dm-integrity.c
3459
remove_range(ic, &dio->range);
drivers/md/dm-integrity.c
3460
INIT_WORK(&dio->work, integrity_bio_wait);
drivers/md/dm-integrity.c
3461
queue_work(ic->offload_wq, &dio->work);
drivers/md/dm-integrity.c
375
static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
drivers/md/dm-integrity.c
376
static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map);
drivers/scsi/bfa/bfa_fcpim.c
2076
bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
drivers/scsi/bfa/bfa_fcpim.c
2119
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
drivers/scsi/bfa/bfa_fcpim.c
2345
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
drivers/scsi/bfa/bfa_fcpim.c
2360
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
drivers/scsi/bfa/bfa_fcpim.c
2374
bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
drivers/scsi/bfa/bfa_fcpim.c
2403
struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
drivers/scsi/bfa/bfa_fcpim.c
2879
bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
drivers/scsi/bfa/bfa_fcpim.c
2897
ioim->dio = dio;
drivers/scsi/bfa/bfa_fcpim.c
3267
cmnd = (struct scsi_cmnd *) ioim->dio;
drivers/scsi/bfa/bfa_fcpim.c
3280
cmnd = (struct scsi_cmnd *) ioim->dio;
drivers/scsi/bfa/bfa_fcpim.c
396
idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
drivers/scsi/bfa/bfa_fcpim.h
193
struct bfad_ioim_s *dio; /* driver IO handle */
drivers/scsi/bfa/bfa_fcpim.h
418
struct bfad_ioim_s *dio,
drivers/scsi/bfa/bfa_fcpim.h
439
void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
drivers/scsi/bfa/bfa_fcpim.h
447
void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
drivers/scsi/bfa/bfa_fcpim.h
452
void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
drivers/scsi/bfa/bfad_im.c
104
bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
drivers/scsi/bfa/bfad_im.c
106
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
drivers/scsi/bfa/bfad_im.c
132
bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio)
drivers/scsi/bfa/bfad_im.c
134
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
drivers/scsi/bfa/bfad_im.c
209
if (hal_io->dio != (struct bfad_ioim_s *) cmnd) {
drivers/scsi/bfa/bfad_im.c
34
bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
drivers/scsi/bfa/bfad_im.c
38
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
fs/btrfs/direct-io.c
803
struct iomap_dio *dio;
fs/btrfs/direct-io.c
906
dio = btrfs_dio_write(iocb, from, written);
fs/btrfs/direct-io.c
909
if (IS_ERR_OR_NULL(dio)) {
fs/btrfs/direct-io.c
910
ret = PTR_ERR_OR_ZERO(dio);
fs/btrfs/direct-io.c
922
ret = iomap_dio_complete(dio);
fs/direct-io.c
1002
dio->result += 1 << blkbits;
fs/direct-io.c
1012
dio_zero_block(dio, sdio, 0, map_bh);
fs/direct-io.c
1030
ret = submit_page_section(dio, sdio, page,
fs/direct-io.c
1036
dio_unpin_page(dio, page);
fs/direct-io.c
1043
dio->result += this_chunk_bytes;
fs/direct-io.c
1052
dio_unpin_page(dio, page);
fs/direct-io.c
1058
static inline int drop_refcount(struct dio *dio)
fs/direct-io.c
1074
spin_lock_irqsave(&dio->bio_lock, flags);
fs/direct-io.c
1075
ret2 = --dio->refcount;
fs/direct-io.c
1076
spin_unlock_irqrestore(&dio->bio_lock, flags);
fs/direct-io.c
1117
struct dio *dio;
fs/direct-io.c
1127
dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
fs/direct-io.c
1128
if (!dio)
fs/direct-io.c
1135
memset(dio, 0, offsetof(struct dio, pages));
fs/direct-io.c
1137
dio->flags = flags;
fs/direct-io.c
1138
if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
fs/direct-io.c
1142
dio->is_pinned = iov_iter_extract_will_pin(iter);
fs/direct-io.c
1145
dio->i_size = i_size_read(inode);
fs/direct-io.c
1146
if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
fs/direct-io.c
1159
if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
fs/direct-io.c
1174
dio->is_async = false;
fs/direct-io.c
1176
dio->is_async = false;
fs/direct-io.c
1178
dio->is_async = true;
fs/direct-io.c
1180
dio->inode = inode;
fs/direct-io.c
1182
dio->opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
fs/direct-io.c
1184
dio->opf |= REQ_NOWAIT;
fs/direct-io.c
1186
dio->opf = REQ_OP_READ;
fs/direct-io.c
1193
if (dio->is_async && iov_iter_rw(iter) == WRITE) {
fs/direct-io.c
1196
retval = dio_set_defer_completion(dio);
fs/direct-io.c
1197
else if (!dio->inode->i_sb->s_dio_done_wq) {
fs/direct-io.c
1203
retval = sb_init_dio_done_wq(dio->inode->i_sb);
fs/direct-io.c
1219
dio->end_io = end_io;
fs/direct-io.c
1223
dio->iocb = iocb;
fs/direct-io.c
1225
spin_lock_init(&dio->bio_lock);
fs/direct-io.c
1226
dio->refcount = 1;
fs/direct-io.c
1228
dio->should_dirty = user_backed_iter(iter) && iov_iter_rw(iter) == READ;
fs/direct-io.c
1243
retval = do_direct_IO(dio, &sdio, &map_bh);
fs/direct-io.c
1245
dio_cleanup(dio, &sdio);
fs/direct-io.c
1258
dio_zero_block(dio, &sdio, 1, &map_bh);
fs/direct-io.c
1263
ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
fs/direct-io.c
1266
dio_unpin_page(dio, sdio.cur_page);
fs/direct-io.c
1270
dio_bio_submit(dio, &sdio);
fs/direct-io.c
1278
dio_cleanup(dio, &sdio);
fs/direct-io.c
1285
if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
fs/direct-io.c
1286
inode_unlock(dio->inode);
fs/direct-io.c
1296
if (dio->is_async && retval == 0 && dio->result &&
fs/direct-io.c
1297
(iov_iter_rw(iter) == READ || dio->result == count))
fs/direct-io.c
1300
dio_await_completion(dio);
fs/direct-io.c
1302
if (drop_refcount(dio) == 0) {
fs/direct-io.c
1303
retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
fs/direct-io.c
1310
if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ)
fs/direct-io.c
1313
kmem_cache_free(dio_cache, dio);
fs/direct-io.c
1320
dio_cache = KMEM_CACHE(dio, SLAB_PANIC);
fs/direct-io.c
166
static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
fs/direct-io.c
168
struct page **pages = dio->pages;
fs/direct-io.c
169
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
fs/direct-io.c
181
if (dio->page_errors == 0)
fs/direct-io.c
182
dio->page_errors = ret;
fs/direct-io.c
183
dio->pages[0] = ZERO_PAGE(0);
fs/direct-io.c
207
static inline struct page *dio_get_page(struct dio *dio,
fs/direct-io.c
213
ret = dio_refill_pages(dio, sdio);
fs/direct-io.c
218
return dio->pages[sdio->head];
fs/direct-io.c
221
static void dio_pin_page(struct dio *dio, struct page *page)
fs/direct-io.c
223
if (dio->is_pinned)
fs/direct-io.c
227
static void dio_unpin_page(struct dio *dio, struct page *page)
fs/direct-io.c
229
if (dio->is_pinned)
fs/direct-io.c
244
static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
fs/direct-io.c
246
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
fs/direct-io.c
247
loff_t offset = dio->iocb->ki_pos;
fs/direct-io.c
260
if (dio->result) {
fs/direct-io.c
261
transferred = dio->result;
fs/direct-io.c
265
((offset + transferred) > dio->i_size))
fs/direct-io.c
266
transferred = dio->i_size - offset;
fs/direct-io.c
273
ret = dio->page_errors;
fs/direct-io.c
275
ret = dio->io_error;
fs/direct-io.c
279
if (dio->end_io) {
fs/direct-io.c
281
err = dio->end_io(dio->iocb, offset, ret, dio->private);
fs/direct-io.c
300
kiocb_invalidate_post_direct_write(dio->iocb, ret);
fs/direct-io.c
302
inode_dio_end(dio->inode);
fs/direct-io.c
310
dio->iocb->ki_pos += transferred;
fs/direct-io.c
313
ret = generic_write_sync(dio->iocb, ret);
fs/direct-io.c
314
dio->iocb->ki_complete(dio->iocb, ret);
fs/direct-io.c
317
kmem_cache_free(dio_cache, dio);
fs/direct-io.c
323
struct dio *dio = container_of(work, struct dio, complete_work);
fs/direct-io.c
325
dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
fs/direct-io.c
328
static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
fs/direct-io.c
335
struct dio *dio = bio->bi_private;
fs/direct-io.c
336
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
fs/direct-io.c
342
dio_bio_complete(dio, bio);
fs/direct-io.c
344
spin_lock_irqsave(&dio->bio_lock, flags);
fs/direct-io.c
345
remaining = --dio->refcount;
fs/direct-io.c
346
if (remaining == 1 && dio->waiter)
fs/direct-io.c
347
wake_up_process(dio->waiter);
fs/direct-io.c
348
spin_unlock_irqrestore(&dio->bio_lock, flags);
fs/direct-io.c
359
if (dio->result)
fs/direct-io.c
360
defer_completion = dio->defer_completion ||
fs/direct-io.c
362
dio->inode->i_mapping->nrpages);
fs/direct-io.c
364
INIT_WORK(&dio->complete_work, dio_aio_complete_work);
fs/direct-io.c
365
queue_work(dio->inode->i_sb->s_dio_done_wq,
fs/direct-io.c
366
&dio->complete_work);
fs/direct-io.c
368
dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
fs/direct-io.c
382
struct dio *dio = bio->bi_private;
fs/direct-io.c
385
spin_lock_irqsave(&dio->bio_lock, flags);
fs/direct-io.c
386
bio->bi_private = dio->bio_list;
fs/direct-io.c
387
dio->bio_list = bio;
fs/direct-io.c
388
if (--dio->refcount == 1 && dio->waiter)
fs/direct-io.c
389
wake_up_process(dio->waiter);
fs/direct-io.c
390
spin_unlock_irqrestore(&dio->bio_lock, flags);
fs/direct-io.c
394
dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
fs/direct-io.c
404
bio = bio_alloc(bdev, nr_vecs, dio->opf, GFP_KERNEL);
fs/direct-io.c
406
if (dio->is_async)
fs/direct-io.c
410
if (dio->is_pinned)
fs/direct-io.c
412
bio->bi_write_hint = file_inode(dio->iocb->ki_filp)->i_write_hint;
fs/direct-io.c
425
static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
fs/direct-io.c
427
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
fs/direct-io.c
431
bio->bi_private = dio;
fs/direct-io.c
433
spin_lock_irqsave(&dio->bio_lock, flags);
fs/direct-io.c
434
dio->refcount++;
fs/direct-io.c
435
spin_unlock_irqrestore(&dio->bio_lock, flags);
fs/direct-io.c
437
if (dio->is_async && dio_op == REQ_OP_READ && dio->should_dirty)
fs/direct-io.c
440
dio->bio_disk = bio->bi_bdev->bd_disk;
fs/direct-io.c
452
static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
fs/direct-io.c
454
if (dio->is_pinned)
fs/direct-io.c
455
unpin_user_pages(dio->pages + sdio->head,
fs/direct-io.c
466
static struct bio *dio_await_one(struct dio *dio)
fs/direct-io.c
471
spin_lock_irqsave(&dio->bio_lock, flags);
fs/direct-io.c
479
while (dio->refcount > 1 && dio->bio_list == NULL) {
fs/direct-io.c
481
dio->waiter = current;
fs/direct-io.c
482
spin_unlock_irqrestore(&dio->bio_lock, flags);
fs/direct-io.c
485
spin_lock_irqsave(&dio->bio_lock, flags);
fs/direct-io.c
486
dio->waiter = NULL;
fs/direct-io.c
488
if (dio->bio_list) {
fs/direct-io.c
489
bio = dio->bio_list;
fs/direct-io.c
490
dio->bio_list = bio->bi_private;
fs/direct-io.c
492
spin_unlock_irqrestore(&dio->bio_lock, flags);
fs/direct-io.c
499
static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
fs/direct-io.c
502
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
fs/direct-io.c
503
bool should_dirty = dio_op == REQ_OP_READ && dio->should_dirty;
fs/direct-io.c
507
dio->io_error = -EAGAIN;
fs/direct-io.c
509
dio->io_error = -EIO;
fs/direct-io.c
512
if (dio->is_async && should_dirty) {
fs/direct-io.c
528
static void dio_await_completion(struct dio *dio)
fs/direct-io.c
532
bio = dio_await_one(dio);
fs/direct-io.c
534
dio_bio_complete(dio, bio);
fs/direct-io.c
545
static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
fs/direct-io.c
550
while (dio->bio_list) {
fs/direct-io.c
555
spin_lock_irqsave(&dio->bio_lock, flags);
fs/direct-io.c
556
bio = dio->bio_list;
fs/direct-io.c
557
dio->bio_list = bio->bi_private;
fs/direct-io.c
558
spin_unlock_irqrestore(&dio->bio_lock, flags);
fs/direct-io.c
559
ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
fs/direct-io.c
568
static int dio_set_defer_completion(struct dio *dio)
fs/direct-io.c
570
struct super_block *sb = dio->inode->i_sb;
fs/direct-io.c
572
if (dio->defer_completion)
fs/direct-io.c
574
dio->defer_completion = true;
fs/direct-io.c
603
static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
fs/direct-io.c
606
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
fs/direct-io.c
619
ret = dio->page_errors;
fs/direct-io.c
642
if (dio->flags & DIO_SKIP_HOLES) {
fs/direct-io.c
643
i_size = i_size_read(dio->inode);
fs/direct-io.c
648
ret = (*sdio->get_block)(dio->inode, fs_startblk,
fs/direct-io.c
652
dio->private = map_bh->b_private;
fs/direct-io.c
655
ret = dio_set_defer_completion(dio);
fs/direct-io.c
663
static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
fs/direct-io.c
669
ret = dio_bio_reap(dio, sdio);
fs/direct-io.c
675
dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
fs/direct-io.c
688
static inline int dio_bio_add_page(struct dio *dio, struct dio_submit *sdio)
fs/direct-io.c
700
dio_pin_page(dio, sdio->cur_page);
fs/direct-io.c
720
static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
fs/direct-io.c
746
dio_bio_submit(dio, sdio);
fs/direct-io.c
750
ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
fs/direct-io.c
755
if (dio_bio_add_page(dio, sdio) != 0) {
fs/direct-io.c
756
dio_bio_submit(dio, sdio);
fs/direct-io.c
757
ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
fs/direct-io.c
759
ret = dio_bio_add_page(dio, sdio);
fs/direct-io.c
785
submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
fs/direct-io.c
789
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
fs/direct-io.c
815
ret = dio_send_cur_page(dio, sdio, map_bh);
fs/direct-io.c
816
dio_unpin_page(dio, sdio->cur_page);
fs/direct-io.c
822
dio_pin_page(dio, page); /* It is in dio */
fs/direct-io.c
834
ret = dio_send_cur_page(dio, sdio, map_bh);
fs/direct-io.c
836
dio_bio_submit(dio, sdio);
fs/direct-io.c
837
dio_unpin_page(dio, sdio->cur_page);
fs/direct-io.c
852
static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
fs/direct-io.c
880
if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
fs/direct-io.c
903
static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
fs/direct-io.c
906
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
fs/direct-io.c
915
page = dio_get_page(dio, sdio);
fs/direct-io.c
936
ret = get_more_blocks(dio, sdio, map_bh);
fs/direct-io.c
938
dio_unpin_page(dio, page);
fs/direct-io.c
983
dio_unpin_page(dio, page);
fs/direct-io.c
991
i_size_aligned = ALIGN(i_size_read(dio->inode),
fs/direct-io.c
996
dio_unpin_page(dio, page);
fs/f2fs/file.c
4799
struct iomap_dio *dio;
fs/f2fs/file.c
4829
dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
fs/f2fs/file.c
4831
if (IS_ERR_OR_NULL(dio)) {
fs/f2fs/file.c
4832
ret = PTR_ERR_OR_ZERO(dio);
fs/f2fs/file.c
4836
ret = iomap_dio_complete(dio);
fs/f2fs/file.c
4874
bool dio;
fs/f2fs/file.c
4883
dio = f2fs_should_use_dio(inode, iocb, to);
fs/f2fs/file.c
4888
(!f2fs_is_pinned_file(inode) || !dio))
fs/f2fs/file.c
4891
if (dio) {
fs/f2fs/file.c
4960
bool dio)
fs/f2fs/file.c
4971
if (dio && f2fs_lfs_mode(sbi))
fs/f2fs/file.c
4977
if (dio && i_size_read(inode) &&
fs/f2fs/file.c
5008
if (dio) {
fs/f2fs/file.c
5099
struct iomap_dio *dio;
fs/f2fs/file.c
5140
dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
fs/f2fs/file.c
5142
if (IS_ERR_OR_NULL(dio)) {
fs/f2fs/file.c
5143
ret = PTR_ERR_OR_ZERO(dio);
fs/f2fs/file.c
5149
ret = iomap_dio_complete(dio);
fs/f2fs/file.c
5206
bool dio;
fs/f2fs/file.c
5243
dio = f2fs_should_use_dio(inode, iocb, from);
fs/f2fs/file.c
5246
if (dio && f2fs_is_atomic_file(inode)) {
fs/f2fs/file.c
5253
preallocated = f2fs_preallocate_blocks(iocb, from, dio);
fs/f2fs/file.c
5262
ret = dio ?
fs/f2fs/file.c
5293
if (ret > 0 && !dio && (iocb->ki_flags & IOCB_DIRECT))
fs/iomap/direct-io.c
103
ssize_t iomap_dio_complete(struct iomap_dio *dio)
fs/iomap/direct-io.c
105
const struct iomap_dio_ops *dops = dio->dops;
fs/iomap/direct-io.c
106
struct kiocb *iocb = dio->iocb;
fs/iomap/direct-io.c
108
ssize_t ret = dio->error;
fs/iomap/direct-io.c
111
ret = dops->end_io(iocb, dio->size, ret, dio->flags);
fs/iomap/direct-io.c
112
if (should_report_dio_fserror(dio))
fs/iomap/direct-io.c
114
iomap_dio_err_type(dio), offset, dio->size,
fs/iomap/direct-io.c
115
dio->error, GFP_NOFS);
fs/iomap/direct-io.c
118
ret = dio->size;
fs/iomap/direct-io.c
120
if (offset + ret > dio->i_size &&
fs/iomap/direct-io.c
121
!(dio->flags & IOMAP_DIO_WRITE))
fs/iomap/direct-io.c
122
ret = dio->i_size - offset;
fs/iomap/direct-io.c
137
if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE) &&
fs/iomap/direct-io.c
138
!(dio->flags & IOMAP_DIO_NO_INVALIDATE))
fs/iomap/direct-io.c
139
kiocb_invalidate_post_direct_write(iocb, dio->size);
fs/iomap/direct-io.c
150
if (dio->flags & IOMAP_DIO_NEED_SYNC)
fs/iomap/direct-io.c
153
ret += dio->done_before;
fs/iomap/direct-io.c
155
trace_iomap_dio_complete(iocb, dio->error, ret);
fs/iomap/direct-io.c
156
kfree(dio);
fs/iomap/direct-io.c
163
struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
fs/iomap/direct-io.c
164
struct kiocb *iocb = dio->iocb;
fs/iomap/direct-io.c
166
iocb->ki_complete(iocb, iomap_dio_complete(dio));
fs/iomap/direct-io.c
174
static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
fs/iomap/direct-io.c
176
cmpxchg(&dio->error, 0, ret);
fs/iomap/direct-io.c
182
static void iomap_dio_done(struct iomap_dio *dio)
fs/iomap/direct-io.c
184
struct kiocb *iocb = dio->iocb;
fs/iomap/direct-io.c
186
if (dio->wait_for_completion) {
fs/iomap/direct-io.c
191
struct task_struct *waiter = dio->submit.waiter;
fs/iomap/direct-io.c
193
WRITE_ONCE(dio->submit.waiter, NULL);
fs/iomap/direct-io.c
203
if (dio->error)
fs/iomap/direct-io.c
204
dio->flags |= IOMAP_DIO_COMP_WORK;
fs/iomap/direct-io.c
213
if ((dio->flags & IOMAP_DIO_WRITE) &&
fs/iomap/direct-io.c
214
!(dio->flags & IOMAP_DIO_COMP_WORK)) {
fs/iomap/direct-io.c
215
if (dio->iocb->ki_filp->f_mapping->nrpages)
fs/iomap/direct-io.c
216
dio->flags |= IOMAP_DIO_COMP_WORK;
fs/iomap/direct-io.c
218
dio->flags |= IOMAP_DIO_NO_INVALIDATE;
fs/iomap/direct-io.c
221
if (dio->flags & IOMAP_DIO_COMP_WORK) {
fs/iomap/direct-io.c
230
INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
fs/iomap/direct-io.c
231
queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
fs/iomap/direct-io.c
236
iomap_dio_complete_work(&dio->aio.work);
fs/iomap/direct-io.c
241
struct iomap_dio *dio = bio->bi_private;
fs/iomap/direct-io.c
243
if (dio->flags & IOMAP_DIO_BOUNCE) {
fs/iomap/direct-io.c
244
bio_iov_iter_unbounce(bio, !!dio->error,
fs/iomap/direct-io.c
245
dio->flags & IOMAP_DIO_USER_BACKED);
fs/iomap/direct-io.c
247
} else if (dio->flags & IOMAP_DIO_USER_BACKED) {
fs/iomap/direct-io.c
256
if (atomic_dec_and_test(&dio->ref)) {
fs/iomap/direct-io.c
262
dio->flags &= ~IOMAP_DIO_COMP_WORK;
fs/iomap/direct-io.c
263
iomap_dio_done(dio);
fs/iomap/direct-io.c
269
struct iomap_dio *dio = bio->bi_private;
fs/iomap/direct-io.c
272
iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
fs/iomap/direct-io.c
279
struct iomap_dio *dio = ioend->io_bio.bi_private;
fs/iomap/direct-io.c
283
iomap_dio_set_error(dio, ioend->io_error);
fs/iomap/direct-io.c
294
static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
fs/iomap/direct-io.c
297
struct inode *inode = file_inode(dio->iocb->ki_filp);
fs/iomap/direct-io.c
312
bio = iomap_dio_alloc_bio(iter, dio, nr_vecs,
fs/iomap/direct-io.c
317
bio->bi_private = dio;
fs/iomap/direct-io.c
326
iomap_dio_submit_bio(iter, dio, bio, pos);
fs/iomap/direct-io.c
332
struct iomap_dio *dio, loff_t pos, unsigned int alignment,
fs/iomap/direct-io.c
339
if (dio->flags & IOMAP_DIO_BOUNCE)
fs/iomap/direct-io.c
340
nr_vecs = bio_iov_bounce_nr_vecs(dio->submit.iter, op);
fs/iomap/direct-io.c
342
nr_vecs = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
fs/iomap/direct-io.c
344
bio = iomap_dio_alloc_bio(iter, dio, nr_vecs, op);
fs/iomap/direct-io.c
349
bio->bi_ioprio = dio->iocb->ki_ioprio;
fs/iomap/direct-io.c
350
bio->bi_private = dio;
fs/iomap/direct-io.c
353
if (dio->flags & IOMAP_DIO_BOUNCE)
fs/iomap/direct-io.c
354
ret = bio_iov_iter_bounce(bio, dio->submit.iter);
fs/iomap/direct-io.c
356
ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
fs/iomap/direct-io.c
371
if (dio->flags & IOMAP_DIO_WRITE)
fs/iomap/direct-io.c
373
else if ((dio->flags & IOMAP_DIO_USER_BACKED) &&
fs/iomap/direct-io.c
374
!(dio->flags & IOMAP_DIO_BOUNCE))
fs/iomap/direct-io.c
380
if (iov_iter_count(dio->submit.iter))
fs/iomap/direct-io.c
381
dio->iocb->ki_flags &= ~IOCB_HIPRI;
fs/iomap/direct-io.c
382
iomap_dio_submit_bio(iter, dio, bio, pos);
fs/iomap/direct-io.c
390
static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
fs/iomap/direct-io.c
408
if (dio->flags & IOMAP_DIO_FSBLOCK_ALIGNED)
fs/iomap/direct-io.c
416
if (dio->flags & IOMAP_DIO_WRITE) {
fs/iomap/direct-io.c
429
dio->flags |= IOMAP_DIO_UNWRITTEN;
fs/iomap/direct-io.c
453
dio->flags |= IOMAP_DIO_COW;
fs/iomap/direct-io.c
471
if (dio->flags & IOMAP_DIO_WRITE_THROUGH) {
fs/iomap/direct-io.c
478
dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
fs/iomap/direct-io.c
492
dio->flags |= IOMAP_DIO_COMP_WORK;
fs/iomap/direct-io.c
504
orig_count = iov_iter_count(dio->submit.iter);
fs/iomap/direct-io.c
505
iov_iter_truncate(dio->submit.iter, length);
fs/iomap/direct-io.c
507
if (!iov_iter_count(dio->submit.iter))
fs/iomap/direct-io.c
515
if (dio->flags & IOMAP_DIO_COMP_WORK)
fs/iomap/direct-io.c
516
dio->iocb->ki_flags &= ~IOCB_HIPRI;
fs/iomap/direct-io.c
522
ret = iomap_dio_zero(iter, dio, pos - pad, pad);
fs/iomap/direct-io.c
532
if (unlikely(data_race(dio->error)))
fs/iomap/direct-io.c
535
ret = iomap_dio_bio_iter_one(iter, dio, pos, alignment, bio_opf);
fs/iomap/direct-io.c
54
struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
fs/iomap/direct-io.c
545
dio->size += ret;
fs/iomap/direct-io.c
549
} while (iov_iter_count(dio->submit.iter));
fs/iomap/direct-io.c
558
((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
fs/iomap/direct-io.c
56
if (dio->dops && dio->dops->bio_set)
fs/iomap/direct-io.c
562
ret = iomap_dio_zero(iter, dio, pos,
fs/iomap/direct-io.c
567
iov_iter_reexpand(dio->submit.iter, orig_count - copied);
fs/iomap/direct-io.c
573
static int iomap_dio_hole_iter(struct iomap_iter *iter, struct iomap_dio *dio)
fs/iomap/direct-io.c
575
loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
fs/iomap/direct-io.c
577
dio->size += length;
fs/iomap/direct-io.c
58
GFP_KERNEL, dio->dops->bio_set);
fs/iomap/direct-io.c
583
static int iomap_dio_inline_iter(struct iomap_iter *iomi, struct iomap_dio *dio)
fs/iomap/direct-io.c
586
struct iov_iter *iter = dio->submit.iter;
fs/iomap/direct-io.c
598
if (dio->flags & IOMAP_DIO_WRITE) {
fs/iomap/direct-io.c
612
dio->size += copied;
fs/iomap/direct-io.c
618
static int iomap_dio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
fs/iomap/direct-io.c
622
if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
fs/iomap/direct-io.c
624
return iomap_dio_hole_iter(iter, dio);
fs/iomap/direct-io.c
626
if (!(dio->flags & IOMAP_DIO_WRITE))
fs/iomap/direct-io.c
627
return iomap_dio_hole_iter(iter, dio);
fs/iomap/direct-io.c
628
return iomap_dio_bio_iter(iter, dio);
fs/iomap/direct-io.c
63
struct iomap_dio *dio, struct bio *bio, loff_t pos)
fs/iomap/direct-io.c
630
return iomap_dio_bio_iter(iter, dio);
fs/iomap/direct-io.c
632
return iomap_dio_inline_iter(iter, dio);
fs/iomap/direct-io.c
641
dio->iocb->ki_filp, current->comm);
fs/iomap/direct-io.c
65
struct kiocb *iocb = dio->iocb;
fs/iomap/direct-io.c
67
atomic_inc(&dio->ref);
fs/iomap/direct-io.c
685
struct iomap_dio *dio;
fs/iomap/direct-io.c
693
dio = kmalloc_obj(*dio);
fs/iomap/direct-io.c
694
if (!dio)
fs/iomap/direct-io.c
697
dio->iocb = iocb;
fs/iomap/direct-io.c
698
atomic_set(&dio->ref, 1);
fs/iomap/direct-io.c
699
dio->size = 0;
fs/iomap/direct-io.c
700
dio->i_size = i_size_read(inode);
fs/iomap/direct-io.c
701
dio->dops = dops;
fs/iomap/direct-io.c
702
dio->error = 0;
fs/iomap/direct-io.c
703
dio->flags = dio_flags & (IOMAP_DIO_FSBLOCK_ALIGNED | IOMAP_DIO_BOUNCE);
fs/iomap/direct-io.c
704
dio->done_before = done_before;
fs/iomap/direct-io.c
706
dio->submit.iter = iter;
fs/iomap/direct-io.c
707
dio->submit.waiter = current;
fs/iomap/direct-io.c
713
if (iomi.pos >= dio->i_size)
fs/iomap/direct-io.c
717
dio->flags |= IOMAP_DIO_USER_BACKED;
fs/iomap/direct-io.c
724
dio->flags |= IOMAP_DIO_WRITE;
fs/iomap/direct-io.c
728
if (iomi.pos >= dio->i_size ||
fs/iomap/direct-io.c
729
iomi.pos + iomi.len > dio->i_size)
fs/iomap/direct-io.c
739
dio->flags |= IOMAP_DIO_NEED_SYNC;
fs/iomap/direct-io.c
75
if (dio->dops && dio->dops->submit_io) {
fs/iomap/direct-io.c
751
dio->flags |= IOMAP_DIO_WRITE_THROUGH;
fs/iomap/direct-io.c
757
if (iomi.pos + iomi.len > dio->i_size)
fs/iomap/direct-io.c
758
dio->flags |= IOMAP_DIO_COMP_WORK;
fs/iomap/direct-io.c
76
dio->dops->submit_io(iter, bio, pos);
fs/iomap/direct-io.c
796
iomi.status = iomap_dio_iter(&iomi, dio);
fs/iomap/direct-io.c
811
if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size)
fs/iomap/direct-io.c
812
iov_iter_revert(iter, iomi.pos - dio->i_size);
fs/iomap/direct-io.c
814
if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) {
fs/iomap/direct-io.c
826
iomap_dio_set_error(dio, ret);
fs/iomap/direct-io.c
83
static inline enum fserror_type iomap_dio_err_type(const struct iomap_dio *dio)
fs/iomap/direct-io.c
836
if (dio->flags & IOMAP_DIO_WRITE_THROUGH)
fs/iomap/direct-io.c
837
dio->flags &= ~IOMAP_DIO_NEED_SYNC;
fs/iomap/direct-io.c
838
else if (dio->flags & IOMAP_DIO_NEED_SYNC)
fs/iomap/direct-io.c
839
dio->flags |= IOMAP_DIO_COMP_WORK;
fs/iomap/direct-io.c
85
if (dio->flags & IOMAP_DIO_WRITE)
fs/iomap/direct-io.c
856
dio->wait_for_completion = wait_for_completion;
fs/iomap/direct-io.c
857
if (!atomic_dec_and_test(&dio->ref)) {
fs/iomap/direct-io.c
865
if (!READ_ONCE(dio->submit.waiter))
fs/iomap/direct-io.c
873
return dio;
fs/iomap/direct-io.c
876
kfree(dio);
fs/iomap/direct-io.c
888
struct iomap_dio *dio;
fs/iomap/direct-io.c
890
dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private,
fs/iomap/direct-io.c
892
if (IS_ERR_OR_NULL(dio))
fs/iomap/direct-io.c
893
return PTR_ERR_OR_ZERO(dio);
fs/iomap/direct-io.c
894
return iomap_dio_complete(dio);
fs/iomap/direct-io.c
90
static inline bool should_report_dio_fserror(const struct iomap_dio *dio)
fs/iomap/direct-io.c
92
switch (dio->error) {
fs/ocfs2/namei.c
2154
bool dio)
fs/ocfs2/namei.c
2158
int namelen = dio ?
fs/ocfs2/namei.c
2162
if (dio) {
fs/ocfs2/namei.c
2212
bool dio)
fs/ocfs2/namei.c
2226
blkno, name, lookup, dio);
fs/ocfs2/namei.c
2255
bool dio)
fs/ocfs2/namei.c
2261
int namelen = dio ?
fs/ocfs2/namei.c
2315
if (dio) {
fs/ocfs2/namei.c
2355
bool dio)
fs/ocfs2/namei.c
2362
if (dio) {
fs/ocfs2/namei.c
70
bool dio);
fs/ocfs2/namei.c
79
bool dio);
fs/ocfs2/namei.h
25
bool dio);
include/linux/iomap.h
584
ssize_t iomap_dio_complete(struct iomap_dio *dio);