root/include/linux/blk_types.h
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Block data types and constants.  Directly include this file only to
 * break include dependency loop.
 */
#ifndef __LINUX_BLK_TYPES_H
#define __LINUX_BLK_TYPES_H

#include <linux/types.h>
#include <linux/bvec.h>
#include <linux/device.h>
#include <linux/ktime.h>
#include <linux/rw_hint.h>

struct bio_set;
struct bio;
struct bio_integrity_payload;
struct page;
struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
struct bio_crypt_ctx;

/*
 * The basic unit of block I/O is a sector. It is used in a number of contexts
 * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
 * bytes. Variables of type sector_t represent an offset or size that is a
 * multiple of 512 bytes. Hence these two constants.
 */
#ifndef SECTOR_SHIFT
#define SECTOR_SHIFT 9
#endif
#ifndef SECTOR_SIZE
#define SECTOR_SIZE (1 << SECTOR_SHIFT)
#endif

#define PAGE_SECTORS_SHIFT      (PAGE_SHIFT - SECTOR_SHIFT)
#define PAGE_SECTORS            (1 << PAGE_SECTORS_SHIFT)
#define SECTOR_MASK             (PAGE_SECTORS - 1)

struct block_device {
        sector_t                bd_start_sect;
        sector_t                bd_nr_sectors;
        struct gendisk *        bd_disk;
        struct request_queue *  bd_queue;
        struct disk_stats __percpu *bd_stats;
        unsigned long           bd_stamp;
        atomic_t                __bd_flags;     // partition number + flags
#define BD_PARTNO               255     // lower 8 bits; assign-once
#define BD_READ_ONLY            (1u<<8) // read-only policy
#define BD_WRITE_HOLDER         (1u<<9)
#define BD_HAS_SUBMIT_BIO       (1u<<10)
#define BD_RO_WARNED            (1u<<11)
#ifdef CONFIG_FAIL_MAKE_REQUEST
#define BD_MAKE_IT_FAIL         (1u<<12)
#endif
        dev_t                   bd_dev;
        struct address_space    *bd_mapping;    /* page cache */

        atomic_t                bd_openers;
        spinlock_t              bd_size_lock; /* for bd_inode->i_size updates */
        void *                  bd_claiming;
        void *                  bd_holder;
        const struct blk_holder_ops *bd_holder_ops;
        struct mutex            bd_holder_lock;
        int                     bd_holders;
        struct kobject          *bd_holder_dir;

        atomic_t                bd_fsfreeze_count; /* number of freeze requests */
        struct mutex            bd_fsfreeze_mutex; /* serialize freeze/thaw */

        struct partition_meta_info *bd_meta_info;
        int                     bd_writers;
#ifdef CONFIG_SECURITY
        void                    *bd_security;
#endif
        /*
         * keep this out-of-line as it's both big and not needed in the fast
         * path
         */
        struct device           bd_device;
} __randomize_layout;

#define bdev_whole(_bdev) \
        ((_bdev)->bd_disk->part0)

#define dev_to_bdev(device) \
        container_of((device), struct block_device, bd_device)

#define bdev_kobj(_bdev) \
        (&((_bdev)->bd_device.kobj))

/*
 * Block error status values.  See block/blk-core:blk_errors for the details.
 */
typedef u8 __bitwise blk_status_t;
typedef u16 blk_short_t;
#define BLK_STS_OK 0
#define BLK_STS_NOTSUPP         ((__force blk_status_t)1)
#define BLK_STS_TIMEOUT         ((__force blk_status_t)2)
#define BLK_STS_NOSPC           ((__force blk_status_t)3)
#define BLK_STS_TRANSPORT       ((__force blk_status_t)4)
#define BLK_STS_TARGET          ((__force blk_status_t)5)
#define BLK_STS_RESV_CONFLICT   ((__force blk_status_t)6)
#define BLK_STS_MEDIUM          ((__force blk_status_t)7)
#define BLK_STS_PROTECTION      ((__force blk_status_t)8)
#define BLK_STS_RESOURCE        ((__force blk_status_t)9)
#define BLK_STS_IOERR           ((__force blk_status_t)10)

/* hack for device mapper, don't use elsewhere: */
#define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)

/*
 * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
 * and the bio would block (cf bio_wouldblock_error())
 */
#define BLK_STS_AGAIN           ((__force blk_status_t)12)

/*
 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
 * device related resources are unavailable, but the driver can guarantee
 * that the queue will be rerun in the future once resources become
 * available again. This is typically the case for device specific
 * resources that are consumed for IO. If the driver fails allocating these
 * resources, we know that inflight (or pending) IO will free these
 * resource upon completion.
 *
 * This is different from BLK_STS_RESOURCE in that it explicitly references
 * a device specific resource. For resources of wider scope, allocation
 * failure can happen without having pending IO. This means that we can't
 * rely on request completions freeing these resources, as IO may not be in
 * flight. Examples of that are kernel memory allocations, DMA mappings, or
 * any other system wide resources.
 */
#define BLK_STS_DEV_RESOURCE    ((__force blk_status_t)13)

/*
 * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
 * path if the device returns a status indicating that too many zone resources
 * are currently open. The same command should be successful if resubmitted
 * after the number of open zones decreases below the device's limits, which is
 * reported in the request_queue's max_open_zones.
 */
#define BLK_STS_ZONE_OPEN_RESOURCE      ((__force blk_status_t)14)

/*
 * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
 * path if the device returns a status indicating that too many zone resources
 * are currently active. The same command should be successful if resubmitted
 * after the number of active zones decreases below the device's limits, which
 * is reported in the request_queue's max_active_zones.
 */
#define BLK_STS_ZONE_ACTIVE_RESOURCE    ((__force blk_status_t)15)

/*
 * BLK_STS_OFFLINE is returned from the driver when the target device is offline
 * or is being taken offline. This could help differentiate the case where a
 * device is intentionally being shut down from a real I/O error.
 */
#define BLK_STS_OFFLINE         ((__force blk_status_t)16)

/*
 * BLK_STS_DURATION_LIMIT is returned from the driver when the target device
 * aborted the command because it exceeded one of its Command Duration Limits.
 */
#define BLK_STS_DURATION_LIMIT  ((__force blk_status_t)17)

/*
 * Invalid size or alignment.
 */
#define BLK_STS_INVAL   ((__force blk_status_t)19)

/**
 * blk_path_error - returns true if error may be path related
 * @error: status the request was completed with
 *
 * Description:
 *     This classifies block error status into non-retryable errors and ones
 *     that may be successful if retried on a failover path.
 *
 * Return:
 *     %false - retrying failover path will not help
 *     %true  - may succeed if retried
 */
static inline bool blk_path_error(blk_status_t error)
{
        switch (error) {
        case BLK_STS_NOTSUPP:
        case BLK_STS_NOSPC:
        case BLK_STS_TARGET:
        case BLK_STS_RESV_CONFLICT:
        case BLK_STS_MEDIUM:
        case BLK_STS_PROTECTION:
                return false;
        }

        /* Anything else could be a path failure, so should be retried */
        return true;
}

typedef __u32 __bitwise blk_opf_t;

typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE           -1U

/*
 * main unit of I/O for the block layer and lower layers (ie drivers and
 * stacking drivers)
 */
struct bio {
        struct bio              *bi_next;       /* request queue link */
        struct block_device     *bi_bdev;
        blk_opf_t               bi_opf;         /* bottom bits REQ_OP, top bits
                                                 * req_flags.
                                                 */
        unsigned short          bi_flags;       /* BIO_* below */
        unsigned short          bi_ioprio;
        enum rw_hint            bi_write_hint;
        u8                      bi_write_stream;
        blk_status_t            bi_status;

        /*
         * The bvec gap bit indicates the lowest set bit in any address offset
         * between all bi_io_vecs. This field is initialized only after the bio
         * is split to the hardware limits (see bio_split_io_at()). The value
         * may be used to consider DMA optimization when performing that
         * mapping. The value is compared to a power of two mask where the
         * result depends on any bit set within the mask, so saving the lowest
         * bit is sufficient to know if any segment gap collides with the mask.
         */
        u8                      bi_bvec_gap_bit;

        atomic_t                __bi_remaining;

        /* The actual vec list, preserved by bio_reset() */
        struct bio_vec          *bi_io_vec;
        struct bvec_iter        bi_iter;

        union {
                /* for polled bios: */
                blk_qc_t                bi_cookie;
                /* for plugged zoned writes only: */
                unsigned int            __bi_nr_segments;
        };
        bio_end_io_t            *bi_end_io;
        void                    *bi_private;
#ifdef CONFIG_BLK_CGROUP
        /*
         * Represents the association of the css and request_queue for the bio.
         * If a bio goes direct to device, it will not have a blkg as it will
         * not have a request_queue associated with it.  The reference is put
         * on release of the bio.
         */
        struct blkcg_gq         *bi_blkg;
        /* Time that this bio was issued. */
        u64                     issue_time_ns;
#ifdef CONFIG_BLK_CGROUP_IOCOST
        u64                     bi_iocost_cost;
#endif
#endif

#ifdef CONFIG_BLK_INLINE_ENCRYPTION
        struct bio_crypt_ctx    *bi_crypt_context;
#endif

#if defined(CONFIG_BLK_DEV_INTEGRITY)
        struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif

        unsigned short          bi_vcnt;        /* how many bio_vec's */

        /*
         * Everything starting with bi_max_vecs will be preserved by bio_reset()
         */

        /*
         * Number of elements in `bi_io_vec` that were allocated for this bio.
         * Only used by the bio submitter to make `bio_add_page` fail once full
         * and to free the `bi_io_vec` allocation. Must not be used in drivers
         * and does not hold a useful value for cloned bios.
         */
        unsigned short          bi_max_vecs;

        atomic_t                __bi_cnt;       /* pin count */

        struct bio_set          *bi_pool;
};

#define BIO_RESET_BYTES         offsetof(struct bio, bi_max_vecs)
#define BIO_MAX_SIZE            UINT_MAX /* max value of bi_iter.bi_size */
#define BIO_MAX_SECTORS         (BIO_MAX_SIZE >> SECTOR_SHIFT)

static inline struct bio_vec *bio_inline_vecs(struct bio *bio)
{
        return (struct bio_vec *)(bio + 1);
}

/*
 * bio flags
 */
enum {
        BIO_PAGE_PINNED,        /* Unpin pages in bio_release_pages() */
        BIO_CLONED,             /* doesn't own data */
        BIO_QUIET,              /* Make BIO Quiet */
        BIO_CHAIN,              /* chained bio, ->bi_remaining in effect */
        BIO_REFFED,             /* bio has elevated ->bi_cnt */
        BIO_BPS_THROTTLED,      /* This bio has already been subjected to
                                 * throttling rules. Don't do it again. */
        BIO_TRACE_COMPLETION,   /* bio_endio() should trace the final completion
                                 * of this bio. */
        BIO_CGROUP_ACCT,        /* has been accounted to a cgroup */
        BIO_QOS_THROTTLED,      /* bio went through rq_qos throttle path */
        /*
         * This bio has completed bps throttling at the single tg granularity,
         * which is different from BIO_BPS_THROTTLED. When the bio is enqueued
         * into the sq->queued of the upper tg, or is about to be dispatched,
         * this flag needs to be cleared. Since blk-throttle and rq_qos are not
         * on the same hierarchical level, reuse the value.
         */
        BIO_TG_BPS_THROTTLED = BIO_QOS_THROTTLED,
        BIO_QOS_MERGED,         /* but went through rq_qos merge path */
        BIO_REMAPPED,
        BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */
        BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */
        BIO_FLAG_LAST
};

typedef __u32 __bitwise blk_mq_req_flags_t;

#define REQ_OP_BITS     8
#define REQ_OP_MASK     (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
#define REQ_FLAG_BITS   24

/**
 * enum req_op - Operations common to the bio and request structures.
 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
 *
 * The least significant bit of the operation number indicates the data
 * transfer direction:
 *
 *   - if the least significant bit is set transfers are TO the device
 *   - if the least significant bit is not set transfers are FROM the device
 *
 * If a operation does not transfer data the least significant bit has no
 * meaning.
 */
enum req_op {
        /** @REQ_OP_READ: read sectors from the device */
        REQ_OP_READ             = (__force blk_opf_t)0,
        /** @REQ_OP_WRITE: write sectors to the device */
        REQ_OP_WRITE            = (__force blk_opf_t)1,
        /** @REQ_OP_FLUSH: flush the volatile write cache */
        REQ_OP_FLUSH            = (__force blk_opf_t)2,
        /** @REQ_OP_DISCARD: discard sectors */
        REQ_OP_DISCARD          = (__force blk_opf_t)3,
        /** @REQ_OP_SECURE_ERASE: securely erase sectors */
        REQ_OP_SECURE_ERASE     = (__force blk_opf_t)5,
        /** @REQ_OP_ZONE_APPEND: write data at the current zone write pointer */
        REQ_OP_ZONE_APPEND      = (__force blk_opf_t)7,
        /** @REQ_OP_WRITE_ZEROES: write the zero filled sector many times */
        REQ_OP_WRITE_ZEROES     = (__force blk_opf_t)9,
        /** @REQ_OP_ZONE_OPEN: Open a zone */
        REQ_OP_ZONE_OPEN        = (__force blk_opf_t)11,
        /** @REQ_OP_ZONE_CLOSE: Close a zone */
        REQ_OP_ZONE_CLOSE       = (__force blk_opf_t)13,
        /** @REQ_OP_ZONE_FINISH: Transition a zone to full */
        REQ_OP_ZONE_FINISH      = (__force blk_opf_t)15,
        /** @REQ_OP_ZONE_RESET: reset a zone write pointer */
        REQ_OP_ZONE_RESET       = (__force blk_opf_t)17,
        /** @REQ_OP_ZONE_RESET_ALL: reset all the zone present on the device */
        REQ_OP_ZONE_RESET_ALL   = (__force blk_opf_t)19,

        /* Driver private requests */
        /* private: */
        REQ_OP_DRV_IN           = (__force blk_opf_t)34,
        REQ_OP_DRV_OUT          = (__force blk_opf_t)35,

        REQ_OP_LAST             = (__force blk_opf_t)36,
};

/* Keep cmd_flag_name[] in sync with the definitions below */
enum req_flag_bits {
        __REQ_FAILFAST_DEV =    /* no driver retries of device errors */
                REQ_OP_BITS,
        __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
        __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
        __REQ_SYNC,             /* request is sync (sync write or read) */
        __REQ_META,             /* metadata io request */
        __REQ_PRIO,             /* boost priority in cfq */
        __REQ_NOMERGE,          /* don't touch this for merging */
        __REQ_IDLE,             /* anticipate more IO after this one */
        __REQ_INTEGRITY,        /* I/O includes block integrity payload */
        __REQ_FUA,              /* forced unit access */
        __REQ_PREFLUSH,         /* request for cache flush */
        __REQ_RAHEAD,           /* read ahead, can fail anytime */
        __REQ_BACKGROUND,       /* background IO */
        __REQ_NOWAIT,           /* Don't wait if request will block */
        __REQ_POLLED,           /* caller polls for completion using bio_poll */
        __REQ_ALLOC_CACHE,      /* allocate IO from cache if available */
        __REQ_SWAP,             /* swap I/O */
        __REQ_DRV,              /* for driver use */
        __REQ_FS_PRIVATE,       /* for file system (submitter) use */
        __REQ_ATOMIC,           /* for atomic write operations */
        /*
         * Command specific flags, keep last:
         */
        /* for REQ_OP_WRITE_ZEROES: */
        __REQ_NOUNMAP,          /* do not free blocks when zeroing */

        __REQ_NR_BITS,          /* stops here */
};

#define REQ_FAILFAST_DEV        \
                        (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
#define REQ_FAILFAST_TRANSPORT  \
                        (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
#define REQ_FAILFAST_DRIVER     \
                        (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
#define REQ_SYNC        (__force blk_opf_t)(1ULL << __REQ_SYNC)
#define REQ_META        (__force blk_opf_t)(1ULL << __REQ_META)
#define REQ_PRIO        (__force blk_opf_t)(1ULL << __REQ_PRIO)
#define REQ_NOMERGE     (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
#define REQ_IDLE        (__force blk_opf_t)(1ULL << __REQ_IDLE)
#define REQ_INTEGRITY   (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
#define REQ_FUA         (__force blk_opf_t)(1ULL << __REQ_FUA)
#define REQ_PREFLUSH    (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
#define REQ_RAHEAD      (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND  (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT      (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
#define REQ_POLLED      (__force blk_opf_t)(1ULL << __REQ_POLLED)
#define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
#define REQ_SWAP        (__force blk_opf_t)(1ULL << __REQ_SWAP)
#define REQ_DRV         (__force blk_opf_t)(1ULL << __REQ_DRV)
#define REQ_FS_PRIVATE  (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
#define REQ_ATOMIC      (__force blk_opf_t)(1ULL << __REQ_ATOMIC)

#define REQ_NOUNMAP     (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)

#define REQ_FAILFAST_MASK \
        (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)

#define REQ_NOMERGE_FLAGS \
        (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)

enum stat_group {
        STAT_READ,
        STAT_WRITE,
        STAT_DISCARD,
        STAT_FLUSH,

        NR_STAT_GROUPS
};

static inline enum req_op bio_op(const struct bio *bio)
{
        return bio->bi_opf & REQ_OP_MASK;
}

static inline bool op_is_write(blk_opf_t op)
{
        return !!(op & (__force blk_opf_t)1);
}

/*
 * Check if the bio or request is one that needs special treatment in the
 * flush state machine.
 */
static inline bool op_is_flush(blk_opf_t op)
{
        return op & (REQ_FUA | REQ_PREFLUSH);
}

/*
 * Reads are always treated as synchronous, as are requests with the FUA or
 * PREFLUSH flag.  Other operations may be marked as synchronous using the
 * REQ_SYNC flag.
 */
static inline bool op_is_sync(blk_opf_t op)
{
        return (op & REQ_OP_MASK) == REQ_OP_READ ||
                (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
}

static inline bool op_is_discard(blk_opf_t op)
{
        return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
}

/*
 * Check if a bio or request operation is a zone management operation.
 */
static inline bool op_is_zone_mgmt(enum req_op op)
{
        switch (op & REQ_OP_MASK) {
        case REQ_OP_ZONE_RESET:
        case REQ_OP_ZONE_RESET_ALL:
        case REQ_OP_ZONE_OPEN:
        case REQ_OP_ZONE_CLOSE:
        case REQ_OP_ZONE_FINISH:
                return true;
        default:
                return false;
        }
}

static inline int op_stat_group(enum req_op op)
{
        if (op_is_discard(op))
                return STAT_DISCARD;
        return op_is_write(op);
}

struct blk_rq_stat {
        u64 mean;
        u64 min;
        u64 max;
        u32 nr_samples;
        u64 batch;
};

#endif /* __LINUX_BLK_TYPES_H */