root/block/blk-wbt.c
// SPDX-License-Identifier: GPL-2.0
/*
 * buffered writeback throttling. loosely based on CoDel. We can't drop
 * packets for IO scheduling, so the logic is something like this:
 *
 * - Monitor latencies in a defined window of time.
 * - If the minimum latency in the above window exceeds some target, increment
 *   scaling step and scale down queue depth by a factor of 2x. The monitoring
 *   window is then shrunk to 100 / sqrt(scaling step + 1).
 * - For any window where we don't have solid data on what the latencies
 *   look like, retain status quo.
 * - If latencies look good, decrement scaling step.
 * - If we're only doing writes, allow the scaling step to go negative. This
 *   will temporarily boost write performance, snapping back to a stable
 *   scaling step of 0 if reads show up or the heavy writers finish. Unlike
 *   positive scaling steps where we shrink the monitoring window, a negative
 *   scaling step retains the default step==0 window size.
 *
 * Copyright (C) 2016 Jens Axboe
 *
 */
#include <linux/kernel.h>
#include <linux/blk_types.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/swap.h>

#include "blk-stat.h"
#include "blk-wbt.h"
#include "blk-rq-qos.h"
#include "elevator.h"
#include "blk.h"

#define CREATE_TRACE_POINTS
#include <trace/events/wbt.h>

enum wbt_flags {
        WBT_TRACKED             = 1,    /* write, tracked for throttling */
        WBT_READ                = 2,    /* read */
        WBT_SWAP                = 4,    /* write, from swap_writeout() */
        WBT_DISCARD             = 8,    /* discard */

        WBT_NR_BITS             = 4,    /* number of bits */
};

enum {
        WBT_RWQ_BG              = 0,
        WBT_RWQ_SWAP,
        WBT_RWQ_DISCARD,
        WBT_NUM_RWQ,
};

/*
 * If current state is WBT_STATE_ON/OFF_DEFAULT, it can be covered to any other
 * state, if current state is WBT_STATE_ON/OFF_MANUAL, it can only be covered
 * to WBT_STATE_OFF/ON_MANUAL.
 */
enum {
        WBT_STATE_ON_DEFAULT    = 1,    /* on by default */
        WBT_STATE_ON_MANUAL     = 2,    /* on manually by sysfs */
        WBT_STATE_OFF_DEFAULT   = 3,    /* off by default */
        WBT_STATE_OFF_MANUAL    = 4,    /* off manually by sysfs */
};

struct rq_wb {
        /*
         * Settings that govern how we throttle
         */
        unsigned int wb_background;             /* background writeback */
        unsigned int wb_normal;                 /* normal writeback */

        short enable_state;                     /* WBT_STATE_* */

        /*
         * Number of consecutive periods where we don't have enough
         * information to make a firm scale up/down decision.
         */
        unsigned int unknown_cnt;

        u64 win_nsec;                           /* default window size */
        u64 cur_win_nsec;                       /* current window size */

        struct blk_stat_callback *cb;

        u64 sync_issue;
        void *sync_cookie;

        unsigned long last_issue;       /* issue time of last read rq */
        unsigned long last_comp;        /* completion time of last read rq */
        unsigned long min_lat_nsec;
        struct rq_qos rqos;
        struct rq_wait rq_wait[WBT_NUM_RWQ];
        struct rq_depth rq_depth;
};

static int wbt_init(struct gendisk *disk, struct rq_wb *rwb);

static inline struct rq_wb *RQWB(struct rq_qos *rqos)
{
        return container_of(rqos, struct rq_wb, rqos);
}

static inline void wbt_clear_state(struct request *rq)
{
        rq->wbt_flags = 0;
}

static inline enum wbt_flags wbt_flags(struct request *rq)
{
        return rq->wbt_flags;
}

static inline bool wbt_is_tracked(struct request *rq)
{
        return rq->wbt_flags & WBT_TRACKED;
}

static inline bool wbt_is_read(struct request *rq)
{
        return rq->wbt_flags & WBT_READ;
}

enum {
        /*
         * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
         * from here depending on device stats
         */
        RWB_DEF_DEPTH   = 16,

        /*
         * 100msec window
         */
        RWB_WINDOW_NSEC         = 100 * 1000 * 1000ULL,

        /*
         * Disregard stats, if we don't meet this minimum
         */
        RWB_MIN_WRITE_SAMPLES   = 3,

        /*
         * If we have this number of consecutive windows without enough
         * information to scale up or down, slowly return to center state
         * (step == 0).
         */
        RWB_UNKNOWN_BUMP        = 5,
};

static inline bool rwb_enabled(struct rq_wb *rwb)
{
        return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
                      rwb->enable_state != WBT_STATE_OFF_MANUAL;
}

static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
{
        if (rwb_enabled(rwb)) {
                const unsigned long cur = jiffies;

                if (cur != *var)
                        *var = cur;
        }
}

/*
 * If a task was rate throttled in balance_dirty_pages() within the last
 * second or so, use that to indicate a higher cleaning rate.
 */
static bool wb_recent_wait(struct rq_wb *rwb)
{
        struct backing_dev_info *bdi = rwb->rqos.disk->bdi;

        return time_before(jiffies, bdi->last_bdp_sleep + HZ);
}

static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
                                          enum wbt_flags wb_acct)
{
        if (wb_acct & WBT_SWAP)
                return &rwb->rq_wait[WBT_RWQ_SWAP];
        else if (wb_acct & WBT_DISCARD)
                return &rwb->rq_wait[WBT_RWQ_DISCARD];

        return &rwb->rq_wait[WBT_RWQ_BG];
}

static void rwb_wake_all(struct rq_wb *rwb)
{
        int i;

        for (i = 0; i < WBT_NUM_RWQ; i++) {
                struct rq_wait *rqw = &rwb->rq_wait[i];

                if (wq_has_sleeper(&rqw->wait))
                        wake_up_all(&rqw->wait);
        }
}

static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
                         enum wbt_flags wb_acct)
{
        int inflight, limit;

        inflight = atomic_dec_return(&rqw->inflight);

        /*
         * For discards, our limit is always the background. For writes, if
         * the device does write back caching, drop further down before we
         * wake people up.
         */
        if (wb_acct & WBT_DISCARD)
                limit = rwb->wb_background;
        else if (blk_queue_write_cache(rwb->rqos.disk->queue) &&
                 !wb_recent_wait(rwb))
                limit = 0;
        else
                limit = rwb->wb_normal;

        /*
         * Don't wake anyone up if we are above the normal limit.
         */
        if (inflight && inflight >= limit)
                return;

        if (wq_has_sleeper(&rqw->wait)) {
                int diff = limit - inflight;

                if (!inflight || diff >= rwb->wb_background / 2)
                        wake_up_all(&rqw->wait);
        }
}

static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
{
        struct rq_wb *rwb = RQWB(rqos);
        struct rq_wait *rqw;

        if (!(wb_acct & WBT_TRACKED))
                return;

        rqw = get_rq_wait(rwb, wb_acct);
        wbt_rqw_done(rwb, rqw, wb_acct);
}

/*
 * Called on completion of a request. Note that it's also called when
 * a request is merged, when the request gets freed.
 */
static void wbt_done(struct rq_qos *rqos, struct request *rq)
{
        struct rq_wb *rwb = RQWB(rqos);

        if (!wbt_is_tracked(rq)) {
                if (wbt_is_read(rq)) {
                        if (rwb->sync_cookie == rq) {
                                rwb->sync_issue = 0;
                                rwb->sync_cookie = NULL;
                        }

                        wb_timestamp(rwb, &rwb->last_comp);
                }
        } else {
                WARN_ON_ONCE(rq == rwb->sync_cookie);
                __wbt_done(rqos, wbt_flags(rq));
        }
        wbt_clear_state(rq);
}

static inline bool stat_sample_valid(struct blk_rq_stat *stat)
{
        /*
         * We need at least one read sample, and a minimum of
         * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
         * that it's writes impacting us, and not just some sole read on
         * a device that is in a lower power state.
         */
        return (stat[READ].nr_samples >= 1 &&
                stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
}

static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
{
        u64 issue = READ_ONCE(rwb->sync_issue);

        if (!issue || !rwb->sync_cookie)
                return 0;

        return blk_time_get_ns() - issue;
}

static inline unsigned int wbt_inflight(struct rq_wb *rwb)
{
        unsigned int i, ret = 0;

        for (i = 0; i < WBT_NUM_RWQ; i++)
                ret += atomic_read(&rwb->rq_wait[i].inflight);

        return ret;
}

enum {
        LAT_OK = 1,
        LAT_UNKNOWN,
        LAT_UNKNOWN_WRITES,
        LAT_EXCEEDED,
};

static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
{
        struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
        struct rq_depth *rqd = &rwb->rq_depth;
        u64 thislat;

        /*
         * If our stored sync issue exceeds the window size, or it
         * exceeds our min target AND we haven't logged any entries,
         * flag the latency as exceeded. wbt works off completion latencies,
         * but for a flooded device, a single sync IO can take a long time
         * to complete after being issued. If this time exceeds our
         * monitoring window AND we didn't see any other completions in that
         * window, then count that sync IO as a violation of the latency.
         */
        thislat = rwb_sync_issue_lat(rwb);
        if (thislat > rwb->cur_win_nsec ||
            (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
                trace_wbt_lat(bdi, thislat);
                return LAT_EXCEEDED;
        }

        /*
         * No read/write mix, if stat isn't valid
         */
        if (!stat_sample_valid(stat)) {
                /*
                 * If we had writes in this stat window and the window is
                 * current, we're only doing writes. If a task recently
                 * waited or still has writes in flights, consider us doing
                 * just writes as well.
                 */
                if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
                    wbt_inflight(rwb))
                        return LAT_UNKNOWN_WRITES;
                return LAT_UNKNOWN;
        }

        /*
         * If the 'min' latency exceeds our target, step down.
         */
        if (stat[READ].min > rwb->min_lat_nsec) {
                trace_wbt_lat(bdi, stat[READ].min);
                trace_wbt_stat(bdi, stat);
                return LAT_EXCEEDED;
        }

        if (rqd->scale_step)
                trace_wbt_stat(bdi, stat);

        return LAT_OK;
}

static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{
        struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
        struct rq_depth *rqd = &rwb->rq_depth;

        trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
                        rwb->wb_background, rwb->wb_normal, rqd->max_depth);
}

static void calc_wb_limits(struct rq_wb *rwb)
{
        if (rwb->min_lat_nsec == 0) {
                rwb->wb_normal = rwb->wb_background = 0;
        } else if (rwb->rq_depth.max_depth <= 2) {
                rwb->wb_normal = rwb->rq_depth.max_depth;
                rwb->wb_background = 1;
        } else {
                rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
                rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
        }
}

static void scale_up(struct rq_wb *rwb)
{
        if (!rq_depth_scale_up(&rwb->rq_depth))
                return;
        calc_wb_limits(rwb);
        rwb->unknown_cnt = 0;
        rwb_wake_all(rwb);
        rwb_trace_step(rwb, tracepoint_string("scale up"));
}

static void scale_down(struct rq_wb *rwb, bool hard_throttle)
{
        if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
                return;
        calc_wb_limits(rwb);
        rwb->unknown_cnt = 0;
        rwb_trace_step(rwb, tracepoint_string("scale down"));
}

static void rwb_arm_timer(struct rq_wb *rwb)
{
        struct rq_depth *rqd = &rwb->rq_depth;

        if (rqd->scale_step > 0) {
                /*
                 * We should speed this up, using some variant of a fast
                 * integer inverse square root calculation. Since we only do
                 * this for every window expiration, it's not a huge deal,
                 * though.
                 */
                rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
                                        int_sqrt((rqd->scale_step + 1) << 8));
        } else {
                /*
                 * For step < 0, we don't want to increase/decrease the
                 * window size.
                 */
                rwb->cur_win_nsec = rwb->win_nsec;
        }

        blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
}

static void wb_timer_fn(struct blk_stat_callback *cb)
{
        struct rq_wb *rwb = cb->data;
        struct rq_depth *rqd = &rwb->rq_depth;
        unsigned int inflight = wbt_inflight(rwb);
        int status;

        if (!rwb->rqos.disk)
                return;

        status = latency_exceeded(rwb, cb->stat);

        trace_wbt_timer(rwb->rqos.disk->bdi, status, rqd->scale_step, inflight);

        /*
         * If we exceeded the latency target, step down. If we did not,
         * step one level up. If we don't know enough to say either exceeded
         * or ok, then don't do anything.
         */
        switch (status) {
        case LAT_EXCEEDED:
                scale_down(rwb, true);
                break;
        case LAT_OK:
                scale_up(rwb);
                break;
        case LAT_UNKNOWN_WRITES:
                /*
                 * We don't have a valid read/write sample, but we do have
                 * writes going on. Allow step to go negative, to increase
                 * write performance.
                 */
                scale_up(rwb);
                break;
        case LAT_UNKNOWN:
                if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
                        break;
                /*
                 * We get here when previously scaled reduced depth, and we
                 * currently don't have a valid read/write sample. For that
                 * case, slowly return to center state (step == 0).
                 */
                if (rqd->scale_step > 0)
                        scale_up(rwb);
                else if (rqd->scale_step < 0)
                        scale_down(rwb, false);
                break;
        default:
                break;
        }

        /*
         * Re-arm timer, if we have IO in flight
         */
        if (rqd->scale_step || inflight)
                rwb_arm_timer(rwb);
}

static void wbt_update_limits(struct rq_wb *rwb)
{
        struct rq_depth *rqd = &rwb->rq_depth;

        rqd->scale_step = 0;
        rqd->scaled_max = false;

        rq_depth_calc_max_depth(rqd);
        calc_wb_limits(rwb);

        rwb_wake_all(rwb);
}

bool wbt_disabled(struct request_queue *q)
{
        struct rq_qos *rqos = wbt_rq_qos(q);

        return !rqos || !rwb_enabled(RQWB(rqos));
}

u64 wbt_get_min_lat(struct request_queue *q)
{
        struct rq_qos *rqos = wbt_rq_qos(q);
        if (!rqos)
                return 0;
        return RQWB(rqos)->min_lat_nsec;
}

static void wbt_set_min_lat(struct request_queue *q, u64 val)
{
        struct rq_qos *rqos = wbt_rq_qos(q);
        if (!rqos)
                return;

        RQWB(rqos)->min_lat_nsec = val;
        if (val)
                RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
        else
                RQWB(rqos)->enable_state = WBT_STATE_OFF_MANUAL;

        wbt_update_limits(RQWB(rqos));
}


static bool close_io(struct rq_wb *rwb)
{
        const unsigned long now = jiffies;

        return time_before(now, rwb->last_issue + HZ / 10) ||
                time_before(now, rwb->last_comp + HZ / 10);
}

#define REQ_HIPRIO      (REQ_SYNC | REQ_META | REQ_PRIO | REQ_SWAP)

static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf)
{
        unsigned int limit;

        if ((opf & REQ_OP_MASK) == REQ_OP_DISCARD)
                return rwb->wb_background;

        /*
         * At this point we know it's a buffered write. If this is
         * swap trying to free memory, or REQ_SYNC is set, then
         * it's WB_SYNC_ALL writeback, and we'll use the max limit for
         * that. If the write is marked as a background write, then use
         * the idle limit, or go to normal if we haven't had competing
         * IO for a bit.
         */
        if ((opf & REQ_HIPRIO) || wb_recent_wait(rwb))
                limit = rwb->rq_depth.max_depth;
        else if ((opf & REQ_BACKGROUND) || close_io(rwb)) {
                /*
                 * If less than 100ms since we completed unrelated IO,
                 * limit us to half the depth for background writeback.
                 */
                limit = rwb->wb_background;
        } else
                limit = rwb->wb_normal;

        return limit;
}

struct wbt_wait_data {
        struct rq_wb *rwb;
        enum wbt_flags wb_acct;
        blk_opf_t opf;
};

static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
{
        struct wbt_wait_data *data = private_data;
        return rq_wait_inc_below(rqw, get_limit(data->rwb, data->opf));
}

static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
{
        struct wbt_wait_data *data = private_data;
        wbt_rqw_done(data->rwb, rqw, data->wb_acct);
}

/*
 * Block if we will exceed our limit, or if we are currently waiting for
 * the timer to kick off queuing again.
 */
static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
                       blk_opf_t opf)
{
        struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
        struct wbt_wait_data data = {
                .rwb = rwb,
                .wb_acct = wb_acct,
                .opf = opf,
        };

        rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
}

static inline bool wbt_should_throttle(struct bio *bio)
{
        switch (bio_op(bio)) {
        case REQ_OP_WRITE:
                /*
                 * Don't throttle WRITE_ODIRECT
                 */
                if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
                    (REQ_SYNC | REQ_IDLE))
                        return false;
                fallthrough;
        case REQ_OP_DISCARD:
                return true;
        default:
                return false;
        }
}

static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
{
        enum wbt_flags flags = 0;

        if (!rwb_enabled(rwb))
                return 0;

        if (bio_op(bio) == REQ_OP_READ) {
                flags = WBT_READ;
        } else if (wbt_should_throttle(bio)) {
                if (bio->bi_opf & REQ_SWAP)
                        flags |= WBT_SWAP;
                if (bio_op(bio) == REQ_OP_DISCARD)
                        flags |= WBT_DISCARD;
                flags |= WBT_TRACKED;
        }
        return flags;
}

static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
{
        struct rq_wb *rwb = RQWB(rqos);
        enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
        __wbt_done(rqos, flags);
}

/* May sleep, if we have exceeded the writeback limits. */
static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
{
        struct rq_wb *rwb = RQWB(rqos);
        enum wbt_flags flags;

        flags = bio_to_wbt_flags(rwb, bio);
        if (!(flags & WBT_TRACKED)) {
                if (flags & WBT_READ)
                        wb_timestamp(rwb, &rwb->last_issue);
                return;
        }

        __wbt_wait(rwb, flags, bio->bi_opf);

        if (!blk_stat_is_active(rwb->cb))
                rwb_arm_timer(rwb);
}

static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
{
        struct rq_wb *rwb = RQWB(rqos);
        rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
}

static void wbt_issue(struct rq_qos *rqos, struct request *rq)
{
        struct rq_wb *rwb = RQWB(rqos);

        if (!rwb_enabled(rwb))
                return;

        /*
         * Track sync issue, in case it takes a long time to complete. Allows us
         * to react quicker, if a sync IO takes a long time to complete. Note
         * that this is just a hint. The request can go away when it completes,
         * so it's important we never dereference it. We only use the address to
         * compare with, which is why we store the sync_issue time locally.
         */
        if (wbt_is_read(rq) && !rwb->sync_issue) {
                rwb->sync_cookie = rq;
                rwb->sync_issue = rq->io_start_time_ns;
        }
}

static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
{
        struct rq_wb *rwb = RQWB(rqos);
        if (!rwb_enabled(rwb))
                return;
        if (rq == rwb->sync_cookie) {
                rwb->sync_issue = 0;
                rwb->sync_cookie = NULL;
        }
}

static int wbt_data_dir(const struct request *rq)
{
        const enum req_op op = req_op(rq);

        if (op == REQ_OP_READ)
                return READ;
        else if (op_is_write(op))
                return WRITE;

        /* don't account */
        return -1;
}

static struct rq_wb *wbt_alloc(void)
{
        struct rq_wb *rwb = kzalloc_obj(*rwb);

        if (!rwb)
                return NULL;

        rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
        if (!rwb->cb) {
                kfree(rwb);
                return NULL;
        }

        return rwb;
}

static void wbt_free(struct rq_wb *rwb)
{
        blk_stat_free_callback(rwb->cb);
        kfree(rwb);
}

/*
 * Enable wbt if defaults are configured that way
 */
static bool __wbt_enable_default(struct gendisk *disk)
{
        struct request_queue *q = disk->queue;
        struct rq_qos *rqos;
        bool enable = IS_ENABLED(CONFIG_BLK_WBT_MQ);

        mutex_lock(&disk->rqos_state_mutex);

        if (blk_queue_disable_wbt(q))
                enable = false;

        /* Throttling already enabled? */
        rqos = wbt_rq_qos(q);
        if (rqos) {
                if (enable && RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
                        RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
                mutex_unlock(&disk->rqos_state_mutex);
                return false;
        }
        mutex_unlock(&disk->rqos_state_mutex);

        /* Queue not registered? Maybe shutting down... */
        if (!blk_queue_registered(q))
                return false;

        if (queue_is_mq(q) && enable)
                return true;
        return false;
}

void wbt_enable_default(struct gendisk *disk)
{
        __wbt_enable_default(disk);
}
EXPORT_SYMBOL_GPL(wbt_enable_default);

void wbt_init_enable_default(struct gendisk *disk)
{
        struct request_queue *q = disk->queue;
        struct rq_wb *rwb;
        unsigned int memflags;

        if (!__wbt_enable_default(disk))
                return;

        rwb = wbt_alloc();
        if (WARN_ON_ONCE(!rwb))
                return;

        if (WARN_ON_ONCE(wbt_init(disk, rwb))) {
                wbt_free(rwb);
                return;
        }

        memflags = blk_debugfs_lock(q);
        blk_mq_debugfs_register_rq_qos(q);
        blk_debugfs_unlock(q, memflags);
}

static u64 wbt_default_latency_nsec(struct request_queue *q)
{
        /*
         * We default to 2msec for non-rotational storage, and 75msec
         * for rotational storage.
         */
        if (blk_queue_rot(q))
                return 75000000ULL;
        return 2000000ULL;
}

static void wbt_queue_depth_changed(struct rq_qos *rqos)
{
        RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->disk->queue);
        wbt_update_limits(RQWB(rqos));
}

static void wbt_exit(struct rq_qos *rqos)
{
        struct rq_wb *rwb = RQWB(rqos);

        blk_stat_remove_callback(rqos->disk->queue, rwb->cb);
        wbt_free(rwb);
}

/*
 * Disable wbt, if enabled by default.
 */
void wbt_disable_default(struct gendisk *disk)
{
        struct rq_qos *rqos = wbt_rq_qos(disk->queue);
        struct rq_wb *rwb;
        if (!rqos)
                return;
        mutex_lock(&disk->rqos_state_mutex);
        rwb = RQWB(rqos);
        if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
                blk_stat_deactivate(rwb->cb);
                rwb->enable_state = WBT_STATE_OFF_DEFAULT;
        }
        mutex_unlock(&disk->rqos_state_mutex);
}
EXPORT_SYMBOL_GPL(wbt_disable_default);

#ifdef CONFIG_BLK_DEBUG_FS
static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
{
        struct rq_qos *rqos = data;
        struct rq_wb *rwb = RQWB(rqos);

        seq_printf(m, "%llu\n", rwb->cur_win_nsec);
        return 0;
}

static int wbt_enabled_show(void *data, struct seq_file *m)
{
        struct rq_qos *rqos = data;
        struct rq_wb *rwb = RQWB(rqos);

        seq_printf(m, "%d\n", rwb->enable_state);
        return 0;
}

static int wbt_id_show(void *data, struct seq_file *m)
{
        struct rq_qos *rqos = data;

        seq_printf(m, "%u\n", rqos->id);
        return 0;
}

static int wbt_inflight_show(void *data, struct seq_file *m)
{
        struct rq_qos *rqos = data;
        struct rq_wb *rwb = RQWB(rqos);
        int i;

        for (i = 0; i < WBT_NUM_RWQ; i++)
                seq_printf(m, "%d: inflight %d\n", i,
                           atomic_read(&rwb->rq_wait[i].inflight));
        return 0;
}

static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
{
        struct rq_qos *rqos = data;
        struct rq_wb *rwb = RQWB(rqos);

        seq_printf(m, "%lu\n", rwb->min_lat_nsec);
        return 0;
}

static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
{
        struct rq_qos *rqos = data;
        struct rq_wb *rwb = RQWB(rqos);

        seq_printf(m, "%u\n", rwb->unknown_cnt);
        return 0;
}

static int wbt_normal_show(void *data, struct seq_file *m)
{
        struct rq_qos *rqos = data;
        struct rq_wb *rwb = RQWB(rqos);

        seq_printf(m, "%u\n", rwb->wb_normal);
        return 0;
}

static int wbt_background_show(void *data, struct seq_file *m)
{
        struct rq_qos *rqos = data;
        struct rq_wb *rwb = RQWB(rqos);

        seq_printf(m, "%u\n", rwb->wb_background);
        return 0;
}

static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
        {"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
        {"enabled", 0400, wbt_enabled_show},
        {"id", 0400, wbt_id_show},
        {"inflight", 0400, wbt_inflight_show},
        {"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
        {"unknown_cnt", 0400, wbt_unknown_cnt_show},
        {"wb_normal", 0400, wbt_normal_show},
        {"wb_background", 0400, wbt_background_show},
        {},
};
#endif

static const struct rq_qos_ops wbt_rqos_ops = {
        .throttle = wbt_wait,
        .issue = wbt_issue,
        .track = wbt_track,
        .requeue = wbt_requeue,
        .done = wbt_done,
        .cleanup = wbt_cleanup,
        .queue_depth_changed = wbt_queue_depth_changed,
        .exit = wbt_exit,
#ifdef CONFIG_BLK_DEBUG_FS
        .debugfs_attrs = wbt_debugfs_attrs,
#endif
};

static int wbt_init(struct gendisk *disk, struct rq_wb *rwb)
{
        struct request_queue *q = disk->queue;
        int ret;
        int i;

        for (i = 0; i < WBT_NUM_RWQ; i++)
                rq_wait_init(&rwb->rq_wait[i]);

        rwb->last_comp = rwb->last_issue = jiffies;
        rwb->win_nsec = RWB_WINDOW_NSEC;
        rwb->enable_state = WBT_STATE_ON_DEFAULT;
        rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
        rwb->min_lat_nsec = wbt_default_latency_nsec(q);
        rwb->rq_depth.queue_depth = blk_queue_depth(q);
        wbt_update_limits(rwb);

        /*
         * Assign rwb and add the stats callback.
         */
        mutex_lock(&q->rq_qos_mutex);
        ret = rq_qos_add(&rwb->rqos, disk, RQ_QOS_WBT, &wbt_rqos_ops);
        mutex_unlock(&q->rq_qos_mutex);
        if (ret)
                return ret;

        blk_stat_add_callback(q, rwb->cb);
        return 0;
}

int wbt_set_lat(struct gendisk *disk, s64 val)
{
        struct request_queue *q = disk->queue;
        struct rq_qos *rqos = wbt_rq_qos(q);
        struct rq_wb *rwb = NULL;
        unsigned int memflags;
        int ret = 0;

        if (!rqos) {
                rwb = wbt_alloc();
                if (!rwb)
                        return -ENOMEM;
        }

        /*
         * Ensure that the queue is idled, in case the latency update
         * ends up either enabling or disabling wbt completely. We can't
         * have IO inflight if that happens.
         */
        memflags = blk_mq_freeze_queue(q);
        if (!rqos) {
                ret = wbt_init(disk, rwb);
                if (ret) {
                        wbt_free(rwb);
                        goto out;
                }
        }

        if (val == -1)
                val = wbt_default_latency_nsec(q);
        else if (val >= 0)
                val *= 1000ULL;

        if (wbt_get_min_lat(q) == val)
                goto out;

        blk_mq_quiesce_queue(q);

        mutex_lock(&disk->rqos_state_mutex);
        wbt_set_min_lat(q, val);
        mutex_unlock(&disk->rqos_state_mutex);

        blk_mq_unquiesce_queue(q);
out:
        blk_mq_unfreeze_queue(q, memflags);

        memflags = blk_debugfs_lock(q);
        blk_mq_debugfs_register_rq_qos(q);
        blk_debugfs_unlock(q, memflags);

        return ret;
}