#ifndef LINUX_IOMAP_H
#define LINUX_IOMAP_H 1
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/blk_types.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/mm_types.h>
#include <linux/blkdev.h>
#include <linux/pagevec.h>
struct address_space;
struct fiemap_extent_info;
struct inode;
struct iomap_iter;
struct iomap_dio;
struct iomap_writepage_ctx;
struct iomap_read_folio_ctx;
struct iov_iter;
struct kiocb;
struct page;
struct vm_area_struct;
struct vm_fault;
#define IOMAP_HOLE 0
#define IOMAP_DELALLOC 1
#define IOMAP_MAPPED 2
#define IOMAP_UNWRITTEN 3
#define IOMAP_INLINE 4
#define IOMAP_F_NEW (1U << 0)
#define IOMAP_F_DIRTY (1U << 1)
#define IOMAP_F_SHARED (1U << 2)
#define IOMAP_F_MERGED (1U << 3)
#ifdef CONFIG_BUFFER_HEAD
#define IOMAP_F_BUFFER_HEAD (1U << 4)
#else
#define IOMAP_F_BUFFER_HEAD 0
#endif
#define IOMAP_F_XATTR (1U << 5)
#define IOMAP_F_BOUNDARY (1U << 6)
#define IOMAP_F_ANON_WRITE (1U << 7)
#define IOMAP_F_ATOMIC_BIO (1U << 8)
#define IOMAP_F_PRIVATE (1U << 12)
#define IOMAP_F_FOLIO_BATCH (1U << 13)
#define IOMAP_F_SIZE_CHANGED (1U << 14)
#define IOMAP_F_STALE (1U << 15)
#define IOMAP_NULL_ADDR -1ULL
struct iomap {
u64 addr;
loff_t offset;
u64 length;
u16 type;
u16 flags;
struct block_device *bdev;
struct dax_device *dax_dev;
void *inline_data;
void *private;
u64 validity_cookie;
};
static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
{
if (iomap->flags & IOMAP_F_ANON_WRITE)
return U64_MAX;
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
}
static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos)
{
return iomap->inline_data + pos - iomap->offset;
}
static inline bool iomap_inline_data_valid(const struct iomap *iomap)
{
return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data);
}
struct iomap_write_ops {
struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
unsigned len);
void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
struct folio *folio);
bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
int (*read_folio_range)(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t len);
};
#define IOMAP_WRITE (1 << 0)
#define IOMAP_ZERO (1 << 1)
#define IOMAP_REPORT (1 << 2)
#define IOMAP_FAULT (1 << 3)
#define IOMAP_DIRECT (1 << 4)
#define IOMAP_NOWAIT (1 << 5)
#define IOMAP_OVERWRITE_ONLY (1 << 6)
#define IOMAP_UNSHARE (1 << 7)
#ifdef CONFIG_FS_DAX
#define IOMAP_DAX (1 << 8)
#else
#define IOMAP_DAX 0
#endif
#define IOMAP_ATOMIC (1 << 9)
#define IOMAP_DONTCACHE (1 << 10)
struct iomap_ops {
int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length,
unsigned flags, struct iomap *iomap,
struct iomap *srcmap);
int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length,
ssize_t written, unsigned flags, struct iomap *iomap);
};
struct iomap_iter {
struct inode *inode;
loff_t pos;
u64 len;
loff_t iter_start_pos;
int status;
unsigned flags;
struct iomap iomap;
struct iomap srcmap;
struct folio_batch *fbatch;
void *private;
};
int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
int iomap_iter_advance(struct iomap_iter *iter, u64 count);
static inline u64 iomap_length_trim(const struct iomap_iter *iter, loff_t pos,
u64 len)
{
u64 end = iter->iomap.offset + iter->iomap.length;
if (iter->srcmap.type != IOMAP_HOLE)
end = min(end, iter->srcmap.offset + iter->srcmap.length);
return min(len, end - pos);
}
static inline u64 iomap_length(const struct iomap_iter *iter)
{
return iomap_length_trim(iter, iter->pos, iter->len);
}
static inline int iomap_iter_advance_full(struct iomap_iter *iter)
{
return iomap_iter_advance(iter, iomap_length(iter));
}
static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
{
if (i->srcmap.type != IOMAP_HOLE)
return &i->srcmap;
return &i->iomap;
}
static inline loff_t iomap_last_written_block(struct inode *inode, loff_t pos,
ssize_t written)
{
if (unlikely(!written))
return round_down(pos, i_blocksize(inode));
return round_up(pos + written, i_blocksize(inode));
}
static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
{
return (iter->iomap.flags & IOMAP_F_SHARED) &&
iter->srcmap.type == IOMAP_MAPPED;
}
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private);
void iomap_read_folio(const struct iomap_ops *ops,
struct iomap_read_folio_ctx *ctx, void *private);
void iomap_readahead(const struct iomap_ops *ops,
struct iomap_read_folio_ctx *ctx, void *private);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops);
unsigned int iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t *start,
loff_t end, unsigned int *iomap_flags);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private);
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private);
vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
void *private);
typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
struct iomap *iomap);
void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
loff_t end_byte, unsigned flags, struct iomap *iomap,
iomap_punch_t punch);
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len, const struct iomap_ops *ops);
loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
const struct iomap_ops *ops);
loff_t iomap_seek_data(struct inode *inode, loff_t offset,
const struct iomap_ops *ops);
sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
const struct iomap_ops *ops);
#define IOMAP_IOEND_SHARED (1U << 0)
#define IOMAP_IOEND_UNWRITTEN (1U << 1)
#define IOMAP_IOEND_BOUNDARY (1U << 2)
#define IOMAP_IOEND_DIRECT (1U << 3)
#define IOMAP_IOEND_DONTCACHE (1U << 4)
#define IOMAP_IOEND_NOMERGE_FLAGS \
(IOMAP_IOEND_SHARED | IOMAP_IOEND_UNWRITTEN | IOMAP_IOEND_DIRECT | \
IOMAP_IOEND_DONTCACHE)
struct iomap_ioend {
struct list_head io_list;
u16 io_flags;
struct inode *io_inode;
size_t io_size;
atomic_t io_remaining;
int io_error;
struct iomap_ioend *io_parent;
loff_t io_offset;
sector_t io_sector;
void *io_private;
struct bio io_bio;
};
static inline struct iomap_ioend *iomap_ioend_from_bio(struct bio *bio)
{
return container_of(bio, struct iomap_ioend, io_bio);
}
struct iomap_writeback_ops {
ssize_t (*writeback_range)(struct iomap_writepage_ctx *wpc,
struct folio *folio, u64 pos, unsigned int len,
u64 end_pos);
int (*writeback_submit)(struct iomap_writepage_ctx *wpc, int error);
};
struct iomap_writepage_ctx {
struct iomap iomap;
struct inode *inode;
struct writeback_control *wbc;
const struct iomap_writeback_ops *ops;
u32 nr_folios;
void *wb_ctx;
};
struct iomap_ioend *iomap_init_ioend(struct inode *inode, struct bio *bio,
loff_t file_offset, u16 ioend_flags);
struct iomap_ioend *iomap_split_ioend(struct iomap_ioend *ioend,
unsigned int max_len, bool is_append);
void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
void iomap_ioend_try_merge(struct iomap_ioend *ioend,
struct list_head *more_ioends);
void iomap_sort_ioends(struct list_head *ioend_list);
ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
loff_t pos, loff_t end_pos, unsigned int dirty_len);
int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error);
void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
int error);
void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
size_t len);
int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio);
int iomap_writepages(struct iomap_writepage_ctx *wpc);
struct iomap_read_folio_ctx {
const struct iomap_read_ops *ops;
struct folio *cur_folio;
struct readahead_control *rac;
void *read_ctx;
};
struct iomap_read_ops {
int (*read_folio_range)(const struct iomap_iter *iter,
struct iomap_read_folio_ctx *ctx, size_t len);
void (*submit_read)(struct iomap_read_folio_ctx *ctx);
};
#define IOMAP_DIO_UNWRITTEN (1 << 0)
#define IOMAP_DIO_COW (1 << 1)
struct iomap_dio_ops {
int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
unsigned flags);
void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
loff_t file_offset);
struct bio_set *bio_set;
};
#define IOMAP_DIO_FORCE_WAIT (1 << 0)
#define IOMAP_DIO_OVERWRITE_ONLY (1 << 1)
#define IOMAP_DIO_PARTIAL (1 << 2)
#define IOMAP_DIO_FSBLOCK_ALIGNED (1 << 3)
#define IOMAP_DIO_BOUNCE (1 << 4)
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
unsigned int dio_flags, void *private, size_t done_before);
struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
unsigned int dio_flags, void *private, size_t done_before);
ssize_t iomap_dio_complete(struct iomap_dio *dio);
void iomap_dio_bio_end_io(struct bio *bio);
#ifdef CONFIG_SWAP
struct file;
struct swap_info_struct;
int iomap_swapfile_activate(struct swap_info_struct *sis,
struct file *swap_file, sector_t *pagespan,
const struct iomap_ops *ops);
#else
# define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO)
#endif
extern struct bio_set iomap_ioend_bioset;
#ifdef CONFIG_BLOCK
extern const struct iomap_read_ops iomap_bio_read_ops;
static inline void iomap_bio_read_folio(struct folio *folio,
const struct iomap_ops *ops)
{
struct iomap_read_folio_ctx ctx = {
.ops = &iomap_bio_read_ops,
.cur_folio = folio,
};
iomap_read_folio(ops, &ctx, NULL);
}
static inline void iomap_bio_readahead(struct readahead_control *rac,
const struct iomap_ops *ops)
{
struct iomap_read_folio_ctx ctx = {
.ops = &iomap_bio_read_ops,
.rac = rac,
};
iomap_readahead(ops, &ctx, NULL);
}
#endif
#endif