root/drivers/mtd/nand/spi/core.c
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2016-2017 Micron Technology, Inc.
 *
 * Authors:
 *      Peter Pan <peterpandong@micron.com>
 *      Boris Brezillon <boris.brezillon@bootlin.com>
 */

#define pr_fmt(fmt)     "spi-nand: " fmt

#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/spinand.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>

static struct spi_mem_op
spinand_fill_reset_op(struct spinand_device *spinand)
{
        return spinand->op_templates->reset;
}

static struct spi_mem_op
spinand_fill_readid_op(struct spinand_device *spinand,
                       u8 naddr, u8 ndummy, void *buf, unsigned int len)
{
        struct spi_mem_op op = spinand->op_templates->readid;

        op.addr.nbytes = naddr;
        op.dummy.nbytes = ndummy;
        op.data.buf.in = buf;
        op.data.nbytes = len;

        return op;
}

struct spi_mem_op
spinand_fill_wr_en_op(struct spinand_device *spinand)
{
        return spinand->op_templates->wr_en;
}

static __maybe_unused struct spi_mem_op
spinand_fill_wr_dis_op(struct spinand_device *spinand)
{
        return spinand->op_templates->wr_dis;
}

struct spi_mem_op
spinand_fill_set_feature_op(struct spinand_device *spinand, u64 reg, const void *valptr)
{
        struct spi_mem_op op = spinand->op_templates->set_feature;

        if (op.cmd.dtr && op.cmd.buswidth == 8)
                reg |= reg << 8;

        op.addr.val = reg;
        op.data.buf.out = valptr;

        return op;
}

struct spi_mem_op
spinand_fill_get_feature_op(struct spinand_device *spinand, u64 reg, void *valptr)
{
        struct spi_mem_op op = spinand->op_templates->get_feature;

        if (op.cmd.dtr && op.cmd.buswidth == 8)
                reg |= reg << 8;

        op.addr.val = reg;
        op.data.buf.in = valptr;

        return op;
}

static struct spi_mem_op
spinand_fill_blk_erase_op(struct spinand_device *spinand, u64 addr)
{
        struct spi_mem_op op = spinand->op_templates->blk_erase;

        op.addr.val = addr;

        return op;
}

static struct spi_mem_op
spinand_fill_page_read_op(struct spinand_device *spinand, u64 addr)
{
        struct spi_mem_op op = spinand->op_templates->page_read;

        op.addr.val = addr;

        return op;
}

struct spi_mem_op
spinand_fill_prog_exec_op(struct spinand_device *spinand, u64 addr)
{
        struct spi_mem_op op = spinand->op_templates->prog_exec;

        op.addr.val = addr;

        return op;
}

int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
        struct spi_mem_op op = SPINAND_OP(spinand, get_feature,
                                          reg, spinand->scratchbuf);
        int ret;

        ret = spi_mem_exec_op(spinand->spimem, &op);
        if (ret)
                return ret;

        *val = *spinand->scratchbuf;
        return 0;
}

int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
{
        struct spi_mem_op op = SPINAND_OP(spinand, set_feature,
                                          reg, spinand->scratchbuf);

        *spinand->scratchbuf = val;
        return spi_mem_exec_op(spinand->spimem, &op);
}

static int spinand_read_status(struct spinand_device *spinand, u8 *status)
{
        return spinand_read_reg_op(spinand, REG_STATUS, status);
}

static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
{
        struct nand_device *nand = spinand_to_nand(spinand);

        if (WARN_ON(spinand->cur_target < 0 ||
                    spinand->cur_target >= nand->memorg.ntargets))
                return -EINVAL;

        *cfg = spinand->cfg_cache[spinand->cur_target];
        return 0;
}

static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        int ret;

        if (WARN_ON(spinand->cur_target < 0 ||
                    spinand->cur_target >= nand->memorg.ntargets))
                return -EINVAL;

        if (spinand->cfg_cache[spinand->cur_target] == cfg)
                return 0;

        ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
        if (ret)
                return ret;

        spinand->cfg_cache[spinand->cur_target] = cfg;
        return 0;
}

/**
 * spinand_upd_cfg() - Update the configuration register
 * @spinand: the spinand device
 * @mask: the mask encoding the bits to update in the config reg
 * @val: the new value to apply
 *
 * Update the configuration register.
 *
 * Return: 0 on success, a negative error code otherwise.
 */
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
{
        int ret;
        u8 cfg;

        ret = spinand_get_cfg(spinand, &cfg);
        if (ret)
                return ret;

        cfg &= ~mask;
        cfg |= val;

        return spinand_set_cfg(spinand, cfg);
}

/**
 * spinand_select_target() - Select a specific NAND target/die
 * @spinand: the spinand device
 * @target: the target/die to select
 *
 * Select a new target/die. If chip only has one die, this function is a NOOP.
 *
 * Return: 0 on success, a negative error code otherwise.
 */
int spinand_select_target(struct spinand_device *spinand, unsigned int target)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        int ret;

        if (WARN_ON(target >= nand->memorg.ntargets))
                return -EINVAL;

        if (spinand->cur_target == target)
                return 0;

        if (nand->memorg.ntargets == 1) {
                spinand->cur_target = target;
                return 0;
        }

        ret = spinand->select_target(spinand, target);
        if (ret)
                return ret;

        spinand->cur_target = target;
        return 0;
}

static int spinand_read_cfg(struct spinand_device *spinand)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        unsigned int target;
        int ret;

        for (target = 0; target < nand->memorg.ntargets; target++) {
                ret = spinand_select_target(spinand, target);
                if (ret)
                        return ret;

                /*
                 * We use spinand_read_reg_op() instead of spinand_get_cfg()
                 * here to bypass the config cache.
                 */
                ret = spinand_read_reg_op(spinand, REG_CFG,
                                          &spinand->cfg_cache[target]);
                if (ret)
                        return ret;
        }

        return 0;
}

static int spinand_init_cfg_cache(struct spinand_device *spinand)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        struct device *dev = &spinand->spimem->spi->dev;

        spinand->cfg_cache = devm_kcalloc(dev,
                                          nand->memorg.ntargets,
                                          sizeof(*spinand->cfg_cache),
                                          GFP_KERNEL);
        if (!spinand->cfg_cache)
                return -ENOMEM;

        return 0;
}

static int spinand_init_quad_enable(struct spinand_device *spinand,
                                    bool enable)
{
        return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
                               enable ? CFG_QUAD_ENABLE : 0);
}

static int spinand_ecc_enable(struct spinand_device *spinand,
                              bool enable)
{
        return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
                               enable ? CFG_ECC_ENABLE : 0);
}

static int spinand_cont_read_enable(struct spinand_device *spinand,
                                    bool enable)
{
        return spinand->set_cont_read(spinand, enable);
}

static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
{
        struct nand_device *nand = spinand_to_nand(spinand);

        if (spinand->eccinfo.get_status)
                return spinand->eccinfo.get_status(spinand, status);

        switch (status & STATUS_ECC_MASK) {
        case STATUS_ECC_NO_BITFLIPS:
                return 0;

        case STATUS_ECC_HAS_BITFLIPS:
                /*
                 * We have no way to know exactly how many bitflips have been
                 * fixed, so let's return the maximum possible value so that
                 * wear-leveling layers move the data immediately.
                 */
                return nanddev_get_ecc_conf(nand)->strength;

        case STATUS_ECC_UNCOR_ERROR:
                return -EBADMSG;

        default:
                break;
        }

        return -EINVAL;
}

static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
                                       struct mtd_oob_region *region)
{
        return -ERANGE;
}

static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
                                        struct mtd_oob_region *region)
{
        if (section)
                return -ERANGE;

        /* Reserve 2 bytes for the BBM. */
        region->offset = 2;
        region->length = 62;

        return 0;
}

static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
        .ecc = spinand_noecc_ooblayout_ecc,
        .free = spinand_noecc_ooblayout_free,
};

static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
{
        struct spinand_device *spinand = nand_to_spinand(nand);
        struct mtd_info *mtd = nanddev_to_mtd(nand);
        struct spinand_ondie_ecc_conf *engine_conf;

        nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
        nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
        nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;

        engine_conf = kzalloc_obj(*engine_conf);
        if (!engine_conf)
                return -ENOMEM;

        nand->ecc.ctx.priv = engine_conf;

        if (spinand->eccinfo.ooblayout)
                mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
        else
                mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);

        return 0;
}

static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
{
        kfree(nand->ecc.ctx.priv);
}

static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
                                            struct nand_page_io_req *req)
{
        struct spinand_device *spinand = nand_to_spinand(nand);
        bool enable = (req->mode != MTD_OPS_RAW);

        if (!enable && spinand->flags & SPINAND_NO_RAW_ACCESS)
                return -EOPNOTSUPP;

        memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));

        /* Only enable or disable the engine */
        return spinand_ecc_enable(spinand, enable);
}

static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
                                           struct nand_page_io_req *req)
{
        struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
        struct spinand_device *spinand = nand_to_spinand(nand);
        struct mtd_info *mtd = spinand_to_mtd(spinand);
        int ret;

        if (req->mode == MTD_OPS_RAW)
                return 0;

        /* Nothing to do when finishing a page write */
        if (req->type == NAND_PAGE_WRITE)
                return 0;

        /* Finish a page read: check the status, report errors/bitflips */
        ret = spinand_check_ecc_status(spinand, engine_conf->status);
        if (ret == -EBADMSG) {
                mtd->ecc_stats.failed++;
        } else if (ret > 0) {
                unsigned int pages;

                /*
                 * Continuous reads don't allow us to get the detail,
                 * so we may exagerate the actual number of corrected bitflips.
                 */
                if (!req->continuous)
                        pages = 1;
                else
                        pages = req->datalen / nanddev_page_size(nand);

                mtd->ecc_stats.corrected += ret * pages;
        }

        return ret;
}

static const struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
        .init_ctx = spinand_ondie_ecc_init_ctx,
        .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
        .prepare_io_req = spinand_ondie_ecc_prepare_io_req,
        .finish_io_req = spinand_ondie_ecc_finish_io_req,
};

static struct nand_ecc_engine spinand_ondie_ecc_engine = {
        .ops = &spinand_ondie_ecc_engine_ops,
};

static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
{
        struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;

        if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
            engine_conf)
                engine_conf->status = status;
}

int spinand_write_enable_op(struct spinand_device *spinand)
{
        struct spi_mem_op op = SPINAND_OP(spinand, wr_en);

        return spi_mem_exec_op(spinand->spimem, &op);
}

static int spinand_load_page_op(struct spinand_device *spinand,
                                const struct nand_page_io_req *req)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        unsigned int row = nanddev_pos_to_row(nand, &req->pos);
        struct spi_mem_op op = SPINAND_OP(spinand, page_read, row);

        return spi_mem_exec_op(spinand->spimem, &op);
}

static int spinand_read_from_cache_op(struct spinand_device *spinand,
                                      const struct nand_page_io_req *req)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        struct mtd_info *mtd = spinand_to_mtd(spinand);
        struct spi_mem_dirmap_desc *rdesc;
        unsigned int nbytes = 0;
        void *buf = NULL;
        u16 column = 0;
        ssize_t ret;

        if (req->datalen) {
                buf = spinand->databuf;
                if (!req->continuous)
                        nbytes = nanddev_page_size(nand);
                else
                        nbytes = round_up(req->dataoffs + req->datalen,
                                          nanddev_page_size(nand));
                column = 0;
        }

        if (req->ooblen) {
                nbytes += nanddev_per_page_oobsize(nand);
                if (!buf) {
                        buf = spinand->oobbuf;
                        column = nanddev_page_size(nand);
                }
        }

        if (req->mode == MTD_OPS_RAW)
                rdesc = spinand->dirmaps[req->pos.plane].rdesc;
        else
                rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;

        if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT)
                column |= req->pos.plane << fls(nanddev_page_size(nand));

        while (nbytes) {
                ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
                if (ret < 0)
                        return ret;

                if (!ret || ret > nbytes)
                        return -EIO;

                nbytes -= ret;
                column += ret;
                buf += ret;

                /*
                 * Dirmap accesses are allowed to toggle the CS.
                 * Toggling the CS during a continuous read is forbidden.
                 */
                if (nbytes && req->continuous) {
                        /*
                         * Spi controller with broken support of continuous
                         * reading was detected. Disable future use of
                         * continuous reading and return -EAGAIN to retry
                         * reading within regular mode.
                         */
                        spinand->cont_read_possible = false;
                        return -EAGAIN;
                }
        }

        if (req->datalen)
                memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
                       req->datalen);

        if (req->ooblen) {
                if (req->mode == MTD_OPS_AUTO_OOB)
                        mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
                                                    spinand->oobbuf,
                                                    req->ooboffs,
                                                    req->ooblen);
                else
                        memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
                               req->ooblen);
        }

        return 0;
}

static int spinand_write_to_cache_op(struct spinand_device *spinand,
                                     const struct nand_page_io_req *req)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        struct mtd_info *mtd = spinand_to_mtd(spinand);
        struct spi_mem_dirmap_desc *wdesc;
        unsigned int nbytes, column = 0;
        void *buf = spinand->databuf;
        ssize_t ret;

        /*
         * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
         * the cache content to 0xFF (depends on vendor implementation), so we
         * must fill the page cache entirely even if we only want to program
         * the data portion of the page, otherwise we might corrupt the BBM or
         * user data previously programmed in OOB area.
         *
         * Only reset the data buffer manually, the OOB buffer is prepared by
         * ECC engines ->prepare_io_req() callback.
         */
        nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
        memset(spinand->databuf, 0xff, nanddev_page_size(nand));

        if (req->datalen)
                memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
                       req->datalen);

        if (req->ooblen) {
                if (req->mode == MTD_OPS_AUTO_OOB)
                        mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
                                                    spinand->oobbuf,
                                                    req->ooboffs,
                                                    req->ooblen);
                else
                        memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
                               req->ooblen);
        }

        if (req->mode == MTD_OPS_RAW)
                wdesc = spinand->dirmaps[req->pos.plane].wdesc;
        else
                wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;

        if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT)
                column |= req->pos.plane << fls(nanddev_page_size(nand));

        while (nbytes) {
                ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
                if (ret < 0)
                        return ret;

                if (!ret || ret > nbytes)
                        return -EIO;

                nbytes -= ret;
                column += ret;
                buf += ret;
        }

        return 0;
}

static int spinand_program_op(struct spinand_device *spinand,
                              const struct nand_page_io_req *req)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        unsigned int row = nanddev_pos_to_row(nand, &req->pos);
        struct spi_mem_op op = SPINAND_OP(spinand, prog_exec, row);

        return spi_mem_exec_op(spinand->spimem, &op);
}

static int spinand_erase_op(struct spinand_device *spinand,
                            const struct nand_pos *pos)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        unsigned int row = nanddev_pos_to_row(nand, pos);
        struct spi_mem_op op = SPINAND_OP(spinand, blk_erase, row);

        return spi_mem_exec_op(spinand->spimem, &op);
}

/**
 * spinand_wait() - Poll memory device status
 * @spinand: the spinand device
 * @initial_delay_us: delay in us before starting to poll
 * @poll_delay_us: time to sleep between reads in us
 * @s: the pointer to variable to store the value of REG_STATUS
 *
 * This function polls a status register (REG_STATUS) and returns when
 * the STATUS_READY bit is 0 or when the timeout has expired.
 *
 * Return: 0 on success, a negative error code otherwise.
 */
int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
                 unsigned long poll_delay_us, u8 *s)
{
        struct spi_mem_op op = SPINAND_OP(spinand, get_feature,
                                          REG_STATUS, spinand->scratchbuf);
        u8 status;
        int ret;

        ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
                                  initial_delay_us,
                                  poll_delay_us,
                                  SPINAND_WAITRDY_TIMEOUT_MS);
        if (ret)
                return ret;

        status = *spinand->scratchbuf;
        if (!(status & STATUS_BUSY))
                goto out;

        /*
         * Extra read, just in case the STATUS_READY bit has changed
         * since our last check
         */
        ret = spinand_read_status(spinand, &status);
        if (ret)
                return ret;

out:
        if (s)
                *s = status;

        return status & STATUS_BUSY ? -ETIMEDOUT : 0;
}

static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
                              u8 ndummy, u8 *buf)
{
        struct spi_mem_op op = SPINAND_OP(spinand, readid,
                                          naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
        int ret;

        ret = spi_mem_exec_op(spinand->spimem, &op);
        if (!ret)
                memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);

        return ret;
}

static int spinand_reset_op(struct spinand_device *spinand)
{
        struct spi_mem_op op = SPINAND_OP(spinand, reset);
        int ret;

        ret = spi_mem_exec_op(spinand->spimem, &op);
        if (ret)
                return ret;

        return spinand_wait(spinand,
                            SPINAND_RESET_INITIAL_DELAY_US,
                            SPINAND_RESET_POLL_DELAY_US,
                            NULL);
}

static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
{
        return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
}

/**
 * spinand_read_page() - Read a page
 * @spinand: the spinand device
 * @req: the I/O request
 *
 * Return: 0 or a positive number of bitflips corrected on success.
 * A negative error code otherwise.
 */
int spinand_read_page(struct spinand_device *spinand,
                      const struct nand_page_io_req *req)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        u8 status;
        int ret;

        ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
        if (ret)
                return ret;

        ret = spinand_load_page_op(spinand, req);
        if (ret)
                return ret;

        ret = spinand_wait(spinand,
                           SPINAND_READ_INITIAL_DELAY_US,
                           SPINAND_READ_POLL_DELAY_US,
                           &status);
        if (ret < 0)
                return ret;

        spinand_ondie_ecc_save_status(nand, status);

        ret = spinand_read_from_cache_op(spinand, req);
        if (ret)
                return ret;

        return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}

/**
 * spinand_write_page() - Write a page
 * @spinand: the spinand device
 * @req: the I/O request
 *
 * Return: 0 or a positive number of bitflips corrected on success.
 * A negative error code otherwise.
 */
int spinand_write_page(struct spinand_device *spinand,
                       const struct nand_page_io_req *req)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        u8 status;
        int ret;

        ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
        if (ret)
                return ret;

        ret = spinand_write_enable_op(spinand);
        if (ret)
                return ret;

        ret = spinand_write_to_cache_op(spinand, req);
        if (ret)
                return ret;

        ret = spinand_program_op(spinand, req);
        if (ret)
                return ret;

        ret = spinand_wait(spinand,
                           SPINAND_WRITE_INITIAL_DELAY_US,
                           SPINAND_WRITE_POLL_DELAY_US,
                           &status);
        if (ret)
                return ret;

        if (status & STATUS_PROG_FAILED)
                return -EIO;

        return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}

static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
                                         struct mtd_oob_ops *ops,
                                         unsigned int *max_bitflips)
{
        struct spinand_device *spinand = mtd_to_spinand(mtd);
        struct nand_device *nand = mtd_to_nanddev(mtd);
        struct mtd_ecc_stats old_stats;
        struct nand_io_iter iter;
        bool disable_ecc = false;
        bool ecc_failed = false;
        unsigned int retry_mode = 0;
        int ret;

        old_stats = mtd->ecc_stats;

        if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
                disable_ecc = true;

        nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
                if (disable_ecc)
                        iter.req.mode = MTD_OPS_RAW;

                ret = spinand_select_target(spinand, iter.req.pos.target);
                if (ret)
                        break;

read_retry:
                ret = spinand_read_page(spinand, &iter.req);
                if (ret < 0 && ret != -EBADMSG)
                        break;

                if (ret == -EBADMSG && spinand->set_read_retry) {
                        if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) {
                                ret = spinand->set_read_retry(spinand, retry_mode);
                                if (ret < 0) {
                                        spinand->set_read_retry(spinand, 0);
                                        return ret;
                                }

                                /* Reset ecc_stats; retry */
                                mtd->ecc_stats = old_stats;
                                goto read_retry;
                        } else {
                                /* No more retry modes; real failure */
                                ecc_failed = true;
                        }
                } else if (ret == -EBADMSG) {
                        ecc_failed = true;
                } else {
                        *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
                }

                ret = 0;
                ops->retlen += iter.req.datalen;
                ops->oobretlen += iter.req.ooblen;

                /* Reset to retry mode 0 */
                if (retry_mode) {
                        retry_mode = 0;
                        ret = spinand->set_read_retry(spinand, retry_mode);
                        if (ret < 0)
                                return ret;
                }
        }

        if (ecc_failed && !ret)
                ret = -EBADMSG;

        return ret;
}

static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
                                            struct mtd_oob_ops *ops,
                                            unsigned int *max_bitflips)
{
        struct spinand_device *spinand = mtd_to_spinand(mtd);
        struct nand_device *nand = mtd_to_nanddev(mtd);
        struct nand_io_iter iter;
        u8 status;
        int ret;

        ret = spinand_cont_read_enable(spinand, true);
        if (ret)
                return ret;

        /*
         * The cache is divided into two halves. While one half of the cache has
         * the requested data, the other half is loaded with the next chunk of data.
         * Therefore, the host can read out the data continuously from page to page.
         * Each data read must be a multiple of 4-bytes and full pages should be read;
         * otherwise, the data output might get out of sequence from one read command
         * to another.
         */
        nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
                ret = spinand_select_target(spinand, iter.req.pos.target);
                if (ret)
                        goto end_cont_read;

                ret = nand_ecc_prepare_io_req(nand, &iter.req);
                if (ret)
                        goto end_cont_read;

                ret = spinand_load_page_op(spinand, &iter.req);
                if (ret)
                        goto end_cont_read;

                ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
                                   SPINAND_READ_POLL_DELAY_US, NULL);
                if (ret < 0)
                        goto end_cont_read;

                ret = spinand_read_from_cache_op(spinand, &iter.req);
                if (ret)
                        goto end_cont_read;

                ops->retlen += iter.req.datalen;

                ret = spinand_read_status(spinand, &status);
                if (ret)
                        goto end_cont_read;

                spinand_ondie_ecc_save_status(nand, status);

                ret = nand_ecc_finish_io_req(nand, &iter.req);
                if (ret < 0)
                        goto end_cont_read;

                *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
                ret = 0;
        }

end_cont_read:
        /*
         * Once all the data has been read out, the host can either pull CS#
         * high and wait for tRST or manually clear the bit in the configuration
         * register to terminate the continuous read operation. We have no
         * guarantee the SPI controller drivers will effectively deassert the CS
         * when we expect them to, so take the register based approach.
         */
        spinand_cont_read_enable(spinand, false);

        return ret;
}

static void spinand_cont_read_init(struct spinand_device *spinand)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type;

        /* OOBs cannot be retrieved so external/on-host ECC engine won't work */
        if (spinand->set_cont_read &&
            (engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE ||
             engine_type == NAND_ECC_ENGINE_TYPE_NONE)) {
                spinand->cont_read_possible = true;

                /*
                 * Ensure continuous read is disabled on probe.
                 * Some devices retain this state across soft reset,
                 * which leaves the OOB area inaccessible and results
                 * in false positive returns from spinand_isbad().
                 */
                spinand_cont_read_enable(spinand, false);
        }
}

static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
                                  struct mtd_oob_ops *ops)
{
        struct nand_device *nand = mtd_to_nanddev(mtd);
        struct spinand_device *spinand = nand_to_spinand(nand);
        struct nand_pos start_pos, end_pos;

        if (!spinand->cont_read_possible)
                return false;

        /* OOBs won't be retrieved */
        if (ops->ooblen || ops->oobbuf)
                return false;

        nanddev_offs_to_pos(nand, from, &start_pos);
        nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);

        /*
         * Continuous reads never cross LUN boundaries. Some devices don't
         * support crossing planes boundaries. Some devices don't even support
         * crossing blocks boundaries. The common case being to read through UBI,
         * we will very rarely read two consequent blocks or more, so it is safer
         * and easier (can be improved) to only enable continuous reads when
         * reading within the same erase block.
         */
        if (start_pos.target != end_pos.target ||
            start_pos.plane != end_pos.plane ||
            start_pos.eraseblock != end_pos.eraseblock)
                return false;

        return start_pos.page < end_pos.page;
}

static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
                            struct mtd_oob_ops *ops)
{
        struct spinand_device *spinand = mtd_to_spinand(mtd);
        struct mtd_ecc_stats old_stats;
        unsigned int max_bitflips = 0;
        int ret;

        mutex_lock(&spinand->lock);

        old_stats = mtd->ecc_stats;

        if (spinand_use_cont_read(mtd, from, ops)) {
                ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
                if (ret == -EAGAIN && !spinand->cont_read_possible) {
                        /*
                         * Spi controller with broken support of continuous
                         * reading was detected (see spinand_read_from_cache_op()),
                         * repeat reading in regular mode.
                         */
                        ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
                }
        } else {
                ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
        }

        if (ops->stats) {
                ops->stats->uncorrectable_errors +=
                        mtd->ecc_stats.failed - old_stats.failed;
                ops->stats->corrected_bitflips +=
                        mtd->ecc_stats.corrected - old_stats.corrected;
        }

        mutex_unlock(&spinand->lock);

        return ret ? ret : max_bitflips;
}

static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
                             struct mtd_oob_ops *ops)
{
        struct spinand_device *spinand = mtd_to_spinand(mtd);
        struct nand_device *nand = mtd_to_nanddev(mtd);
        struct nand_io_iter iter;
        bool disable_ecc = false;
        int ret = 0;

        if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
                disable_ecc = true;

        mutex_lock(&spinand->lock);

        nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
                if (disable_ecc)
                        iter.req.mode = MTD_OPS_RAW;

                ret = spinand_select_target(spinand, iter.req.pos.target);
                if (ret)
                        break;

                ret = spinand_write_page(spinand, &iter.req);
                if (ret)
                        break;

                ops->retlen += iter.req.datalen;
                ops->oobretlen += iter.req.ooblen;
        }

        mutex_unlock(&spinand->lock);

        return ret;
}

static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
        struct spinand_device *spinand = nand_to_spinand(nand);
        u8 marker[2] = { };
        struct nand_page_io_req req = {
                .pos = *pos,
                .ooblen = sizeof(marker),
                .ooboffs = 0,
                .oobbuf.in = marker,
                .mode = MTD_OPS_RAW,
        };
        int ret;

        spinand_select_target(spinand, pos->target);

        ret = spinand_read_page(spinand, &req);
        if (ret == -EOPNOTSUPP) {
                /* Retry with ECC in case raw access is not supported */
                req.mode = MTD_OPS_PLACE_OOB;
                spinand_read_page(spinand, &req);
        }

        if (marker[0] != 0xff || marker[1] != 0xff)
                return true;

        return false;
}

static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
{
        struct nand_device *nand = mtd_to_nanddev(mtd);
        struct spinand_device *spinand = nand_to_spinand(nand);
        struct nand_pos pos;
        int ret;

        nanddev_offs_to_pos(nand, offs, &pos);
        mutex_lock(&spinand->lock);
        ret = nanddev_isbad(nand, &pos);
        mutex_unlock(&spinand->lock);

        return ret;
}

static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
{
        struct spinand_device *spinand = nand_to_spinand(nand);
        u8 marker[2] = { };
        struct nand_page_io_req req = {
                .pos = *pos,
                .ooboffs = 0,
                .ooblen = sizeof(marker),
                .oobbuf.out = marker,
                .mode = MTD_OPS_RAW,
        };
        int ret;

        ret = spinand_select_target(spinand, pos->target);
        if (ret)
                return ret;

        ret = spinand_write_page(spinand, &req);
        if (ret == -EOPNOTSUPP) {
                /* Retry with ECC in case raw access is not supported */
                req.mode = MTD_OPS_PLACE_OOB;
                ret = spinand_write_page(spinand, &req);
        }

        return ret;
}

static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
{
        struct nand_device *nand = mtd_to_nanddev(mtd);
        struct spinand_device *spinand = nand_to_spinand(nand);
        struct nand_pos pos;
        int ret;

        nanddev_offs_to_pos(nand, offs, &pos);
        mutex_lock(&spinand->lock);
        ret = nanddev_markbad(nand, &pos);
        mutex_unlock(&spinand->lock);

        return ret;
}

static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
{
        struct spinand_device *spinand = nand_to_spinand(nand);
        u8 status;
        int ret;

        ret = spinand_select_target(spinand, pos->target);
        if (ret)
                return ret;

        ret = spinand_write_enable_op(spinand);
        if (ret)
                return ret;

        ret = spinand_erase_op(spinand, pos);
        if (ret)
                return ret;

        ret = spinand_wait(spinand,
                           SPINAND_ERASE_INITIAL_DELAY_US,
                           SPINAND_ERASE_POLL_DELAY_US,
                           &status);

        if (!ret && (status & STATUS_ERASE_FAILED))
                ret = -EIO;

        return ret;
}

static int spinand_mtd_erase(struct mtd_info *mtd,
                             struct erase_info *einfo)
{
        struct spinand_device *spinand = mtd_to_spinand(mtd);
        int ret;

        mutex_lock(&spinand->lock);
        ret = nanddev_mtd_erase(mtd, einfo);
        mutex_unlock(&spinand->lock);

        return ret;
}

static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
{
        struct spinand_device *spinand = mtd_to_spinand(mtd);
        struct nand_device *nand = mtd_to_nanddev(mtd);
        struct nand_pos pos;
        int ret;

        nanddev_offs_to_pos(nand, offs, &pos);
        mutex_lock(&spinand->lock);
        ret = nanddev_isreserved(nand, &pos);
        mutex_unlock(&spinand->lock);

        return ret;
}

static struct spi_mem_dirmap_desc *spinand_create_rdesc(
                                        struct spinand_device *spinand,
                                        struct spi_mem_dirmap_info *info)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        struct spi_mem_dirmap_desc *desc = NULL;

        if (spinand->cont_read_possible) {
                /*
                 * spi controller may return an error if info->length is
                 * too large
                 */
                info->length = nanddev_eraseblock_size(nand);
                desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
                                                  spinand->spimem, info);
        }

        if (IS_ERR_OR_NULL(desc)) {
                /*
                 * continuous reading is not supported by flash or
                 * its spi controller, use regular reading
                 */
                spinand->cont_read_possible = false;

                info->length = nanddev_page_size(nand) +
                               nanddev_per_page_oobsize(nand);
                desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
                                                  spinand->spimem, info);
        }

        return desc;
}

static int spinand_create_dirmap(struct spinand_device *spinand,
                                 unsigned int plane)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        struct spi_mem_dirmap_info info = { 0 };
        struct spi_mem_dirmap_desc *desc;

        /* The plane number is passed in MSB just above the column address */
        info.offset = plane << fls(nand->memorg.pagesize);

        info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
        info.op_tmpl = *spinand->op_templates->update_cache;
        desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
                                          spinand->spimem, &info);
        if (IS_ERR(desc))
                return PTR_ERR(desc);

        spinand->dirmaps[plane].wdesc = desc;

        info.op_tmpl = *spinand->op_templates->read_cache;
        desc = spinand_create_rdesc(spinand, &info);
        if (IS_ERR(desc))
                return PTR_ERR(desc);

        spinand->dirmaps[plane].rdesc = desc;

        if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) {
                spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
                spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;

                return 0;
        }

        info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
        info.op_tmpl = *spinand->op_templates->update_cache;
        info.op_tmpl.data.ecc = true;
        desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
                                          spinand->spimem, &info);
        if (IS_ERR(desc))
                return PTR_ERR(desc);

        spinand->dirmaps[plane].wdesc_ecc = desc;

        info.op_tmpl = *spinand->op_templates->read_cache;
        info.op_tmpl.data.ecc = true;
        desc = spinand_create_rdesc(spinand, &info);
        if (IS_ERR(desc))
                return PTR_ERR(desc);

        spinand->dirmaps[plane].rdesc_ecc = desc;

        return 0;
}

static int spinand_create_dirmaps(struct spinand_device *spinand)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        int i, ret;

        spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
                                        sizeof(*spinand->dirmaps) *
                                        nand->memorg.planes_per_lun,
                                        GFP_KERNEL);
        if (!spinand->dirmaps)
                return -ENOMEM;

        for (i = 0; i < nand->memorg.planes_per_lun; i++) {
                ret = spinand_create_dirmap(spinand, i);
                if (ret)
                        return ret;
        }

        return 0;
}

static const struct nand_ops spinand_ops = {
        .erase = spinand_erase,
        .markbad = spinand_markbad,
        .isbad = spinand_isbad,
};

static const struct spinand_manufacturer *spinand_manufacturers[] = {
        &alliancememory_spinand_manufacturer,
        &ato_spinand_manufacturer,
        &dosilicon_spinand_manufacturer,
        &esmt_8c_spinand_manufacturer,
        &esmt_c8_spinand_manufacturer,
        &fmsh_spinand_manufacturer,
        &foresee_spinand_manufacturer,
        &gigadevice_spinand_manufacturer,
        &macronix_spinand_manufacturer,
        &micron_spinand_manufacturer,
        &paragon_spinand_manufacturer,
        &skyhigh_spinand_manufacturer,
        &toshiba_spinand_manufacturer,
        &winbond_spinand_manufacturer,
        &xtx_spinand_manufacturer,
};

static int spinand_manufacturer_match(struct spinand_device *spinand,
                                      enum spinand_readid_method rdid_method)
{
        u8 *id = spinand->id.data;
        unsigned int i;
        int ret;

        for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
                const struct spinand_manufacturer *manufacturer =
                        spinand_manufacturers[i];

                if (id[0] != manufacturer->id)
                        continue;

                ret = spinand_match_and_init(spinand,
                                             manufacturer->chips,
                                             manufacturer->nchips,
                                             rdid_method);
                if (ret < 0)
                        continue;

                spinand->manufacturer = manufacturer;
                return 0;
        }
        return -EOPNOTSUPP;
}

static int spinand_id_detect(struct spinand_device *spinand)
{
        u8 *id = spinand->id.data;
        int ret;

        ret = spinand_read_id_op(spinand, 0, 0, id);
        if (ret)
                return ret;
        ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
        if (!ret)
                return 0;

        ret = spinand_read_id_op(spinand, 1, 0, id);
        if (ret)
                return ret;
        ret = spinand_manufacturer_match(spinand,
                                         SPINAND_READID_METHOD_OPCODE_ADDR);
        if (!ret)
                return 0;

        ret = spinand_read_id_op(spinand, 0, 1, id);
        if (ret)
                return ret;
        ret = spinand_manufacturer_match(spinand,
                                         SPINAND_READID_METHOD_OPCODE_DUMMY);

        return ret;
}

static int spinand_manufacturer_init(struct spinand_device *spinand)
{
        int ret;

        if (spinand->manufacturer->ops->init) {
                ret = spinand->manufacturer->ops->init(spinand);
                if (ret)
                        return ret;
        }

        return 0;
}

static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
{
        /* Release manufacturer private data */
        if (spinand->manufacturer->ops->cleanup)
                return spinand->manufacturer->ops->cleanup(spinand);
}

static bool spinand_op_is_odtr(const struct spi_mem_op *op)
{
        return op->cmd.dtr && op->cmd.buswidth == 8;
}

static void spinand_init_ssdr_templates(struct spinand_device *spinand)
{
        struct spinand_mem_ops *tmpl = &spinand->ssdr_op_templates;

        tmpl->reset = (struct spi_mem_op)SPINAND_RESET_1S_0_0_OP;
        tmpl->readid = (struct spi_mem_op)SPINAND_READID_1S_1S_1S_OP(0, 0, NULL, 0);
        tmpl->wr_en = (struct spi_mem_op)SPINAND_WR_EN_1S_0_0_OP;
        tmpl->wr_dis = (struct spi_mem_op)SPINAND_WR_DIS_1S_0_0_OP;
        tmpl->set_feature = (struct spi_mem_op)SPINAND_SET_FEATURE_1S_1S_1S_OP(0, NULL);
        tmpl->get_feature = (struct spi_mem_op)SPINAND_GET_FEATURE_1S_1S_1S_OP(0, NULL);
        tmpl->blk_erase = (struct spi_mem_op)SPINAND_BLK_ERASE_1S_1S_0_OP(0);
        tmpl->page_read = (struct spi_mem_op)SPINAND_PAGE_READ_1S_1S_0_OP(0);
        tmpl->prog_exec = (struct spi_mem_op)SPINAND_PROG_EXEC_1S_1S_0_OP(0);
        spinand->op_templates = &spinand->ssdr_op_templates;
        spinand->bus_iface = SSDR;
}

static int spinand_support_vendor_ops(struct spinand_device *spinand,
                                      const struct spinand_info *info,
                                      enum spinand_bus_interface iface)
{
        int i;

        if (!info->vendor_ops)
                return 0;
        /*
         * The vendor ops array is only used in order to verify this chip and all its memory
         * operations are supported. If we see patterns emerging, we could ideally name these
         * operations and define them at the SPI NAND core level instead.
         * For now, this only serves as a sanity check.
         */
        for (i = 0; i < info->vendor_ops->nops; i++) {
                const struct spi_mem_op *op = &info->vendor_ops->ops[i];

                if ((iface == SSDR && spinand_op_is_odtr(op)) ||
                    (iface == ODTR && !spinand_op_is_odtr(op)))
                        continue;

                if (!spi_mem_supports_op(spinand->spimem, op))
                        return -EOPNOTSUPP;
        }

        return 0;
}

static int spinand_init_odtr_instruction_set(struct spinand_device *spinand)
{
        struct spinand_mem_ops *tmpl = &spinand->odtr_op_templates;

        tmpl->reset = (struct spi_mem_op)SPINAND_RESET_8D_0_0_OP;
        if (!spi_mem_supports_op(spinand->spimem, &tmpl->reset))
                return -EOPNOTSUPP;

        tmpl->readid = (struct spi_mem_op)SPINAND_READID_8D_8D_8D_OP(0, 0, NULL, 0);
        if (!spi_mem_supports_op(spinand->spimem, &tmpl->readid))
                return -EOPNOTSUPP;

        tmpl->wr_en = (struct spi_mem_op)SPINAND_WR_EN_8D_0_0_OP;
        if (!spi_mem_supports_op(spinand->spimem, &tmpl->wr_en))
                return -EOPNOTSUPP;

        tmpl->wr_dis = (struct spi_mem_op)SPINAND_WR_DIS_8D_0_0_OP;
        if (!spi_mem_supports_op(spinand->spimem, &tmpl->wr_dis))
                return -EOPNOTSUPP;

        tmpl->set_feature = (struct spi_mem_op)SPINAND_SET_FEATURE_8D_8D_8D_OP(0, NULL);
        if (!spi_mem_supports_op(spinand->spimem, &tmpl->set_feature))
                return -EOPNOTSUPP;

        tmpl->get_feature = (struct spi_mem_op)SPINAND_GET_FEATURE_8D_8D_8D_OP(0, NULL);
        if (!spi_mem_supports_op(spinand->spimem, &tmpl->get_feature))
                return -EOPNOTSUPP;

        tmpl->blk_erase = (struct spi_mem_op)SPINAND_BLK_ERASE_8D_8D_0_OP(0);
        if (!spi_mem_supports_op(spinand->spimem, &tmpl->blk_erase))
                return -EOPNOTSUPP;

        tmpl->page_read = (struct spi_mem_op)SPINAND_PAGE_READ_8D_8D_0_OP(0);
        if (!spi_mem_supports_op(spinand->spimem, &tmpl->page_read))
                return -EOPNOTSUPP;

        tmpl->prog_exec = (struct spi_mem_op)SPINAND_PROG_EXEC_8D_8D_0_OP(0);
        if (!spi_mem_supports_op(spinand->spimem, &tmpl->prog_exec))
                return -EOPNOTSUPP;

        return 0;
}

static const struct spi_mem_op *
spinand_select_op_variant(struct spinand_device *spinand, enum spinand_bus_interface iface,
                          const struct spinand_op_variants *variants)
{
        struct nand_device *nand = spinand_to_nand(spinand);
        const struct spi_mem_op *best_variant = NULL;
        u64 best_op_duration_ns = ULLONG_MAX;
        unsigned int i;

        for (i = 0; i < variants->nops; i++) {
                struct spi_mem_op op = variants->ops[i];
                u64 op_duration_ns = 0;
                unsigned int nbytes;
                int ret;

                if ((iface == SSDR && spinand_op_is_odtr(&op)) ||
                    (iface == ODTR && !spinand_op_is_odtr(&op)))
                        continue;

                nbytes = nanddev_per_page_oobsize(nand) +
                         nanddev_page_size(nand);

                while (nbytes) {
                        op.data.nbytes = nbytes;
                        ret = spi_mem_adjust_op_size(spinand->spimem, &op);
                        if (ret)
                                break;

                        spi_mem_adjust_op_freq(spinand->spimem, &op);

                        if (!spi_mem_supports_op(spinand->spimem, &op))
                                break;

                        nbytes -= op.data.nbytes;

                        op_duration_ns += spi_mem_calc_op_duration(spinand->spimem, &op);
                }

                if (!nbytes && op_duration_ns < best_op_duration_ns) {
                        best_op_duration_ns = op_duration_ns;
                        best_variant = &variants->ops[i];
                }
        }

        return best_variant;
}

/**
 * spinand_match_and_init() - Try to find a match between a device ID and an
 *                            entry in a spinand_info table
 * @spinand: SPI NAND object
 * @table: SPI NAND device description table
 * @table_size: size of the device description table
 * @rdid_method: read id method to match
 *
 * Match between a device ID retrieved through the READ_ID command and an
 * entry in the SPI NAND description table. If a match is found, the spinand
 * object will be initialized with information provided by the matching
 * spinand_info entry.
 *
 * Return: 0 on success, a negative error code otherwise.
 */
int spinand_match_and_init(struct spinand_device *spinand,
                           const struct spinand_info *table,
                           unsigned int table_size,
                           enum spinand_readid_method rdid_method)
{
        u8 *id = spinand->id.data;
        struct nand_device *nand = spinand_to_nand(spinand);
        unsigned int i;
        int ret;

        for (i = 0; i < table_size; i++) {
                const struct spinand_info *info = &table[i];
                const struct spi_mem_op *op;

                if (rdid_method != info->devid.method)
                        continue;

                if (memcmp(id + 1, info->devid.id, info->devid.len))
                        continue;

                nand->memorg = table[i].memorg;
                nanddev_set_ecc_requirements(nand, &table[i].eccreq);
                spinand->eccinfo = table[i].eccinfo;
                spinand->flags = table[i].flags;
                spinand->id.len = 1 + table[i].devid.len;
                spinand->select_target = table[i].select_target;
                spinand->configure_chip = table[i].configure_chip;
                spinand->set_cont_read = table[i].set_cont_read;
                spinand->fact_otp = &table[i].fact_otp;
                spinand->user_otp = &table[i].user_otp;
                spinand->read_retries = table[i].read_retries;
                spinand->set_read_retry = table[i].set_read_retry;

                /* I/O variants selection with single-spi SDR commands */

                op = spinand_select_op_variant(spinand, SSDR,
                                               info->op_variants.read_cache);
                if (!op)
                        return -EOPNOTSUPP;

                spinand->ssdr_op_templates.read_cache = op;

                op = spinand_select_op_variant(spinand, SSDR,
                                               info->op_variants.write_cache);
                if (!op)
                        return -EOPNOTSUPP;

                spinand->ssdr_op_templates.write_cache = op;

                op = spinand_select_op_variant(spinand, SSDR,
                                               info->op_variants.update_cache);
                if (!op)
                        return -EOPNOTSUPP;

                spinand->ssdr_op_templates.update_cache = op;

                ret = spinand_support_vendor_ops(spinand, info, SSDR);
                if (ret)
                        return ret;

                /* I/O variants selection with octo-spi DDR commands (optional) */

                ret = spinand_init_odtr_instruction_set(spinand);
                if (ret)
                        return 0;

                ret = spinand_support_vendor_ops(spinand, info, ODTR);
                if (ret)
                        return 0;

                op = spinand_select_op_variant(spinand, ODTR,
                                               info->op_variants.read_cache);
                spinand->odtr_op_templates.read_cache = op;

                op = spinand_select_op_variant(spinand, ODTR,
                                               info->op_variants.write_cache);
                spinand->odtr_op_templates.write_cache = op;

                op = spinand_select_op_variant(spinand, ODTR,
                                               info->op_variants.update_cache);
                spinand->odtr_op_templates.update_cache = op;

                return 0;
        }

        return -EOPNOTSUPP;
}

static int spinand_detect(struct spinand_device *spinand)
{
        struct device *dev = &spinand->spimem->spi->dev;
        struct nand_device *nand = spinand_to_nand(spinand);
        int ret;

        ret = spinand_reset_op(spinand);
        if (ret)
                return ret;

        ret = spinand_id_detect(spinand);
        if (ret) {
                dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
                        spinand->id.data);
                return ret;
        }

        if (nand->memorg.ntargets > 1 && !spinand->select_target) {
                dev_err(dev,
                        "SPI NANDs with more than one die must implement ->select_target()\n");
                return -EINVAL;
        }

        dev_info(&spinand->spimem->spi->dev,
                 "%s SPI NAND was found.\n", spinand->manufacturer->name);
        dev_info(&spinand->spimem->spi->dev,
                 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
                 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
                 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));

        return 0;
}

static int spinand_configure_chip(struct spinand_device *spinand)
{
        bool odtr = false, quad_enable = false;
        int ret;

        if (spinand->odtr_op_templates.read_cache &&
            spinand->odtr_op_templates.write_cache &&
            spinand->odtr_op_templates.update_cache)
                odtr = true;

        if (odtr) {
                if (!spinand->configure_chip)
                        goto try_ssdr;

                /* ODTR bus interface configuration happens here */
                ret = spinand->configure_chip(spinand, ODTR);
                if (ret) {
                        spinand->odtr_op_templates.read_cache = NULL;
                        spinand->odtr_op_templates.write_cache = NULL;
                        spinand->odtr_op_templates.update_cache = NULL;
                        goto try_ssdr;
                }

                spinand->op_templates = &spinand->odtr_op_templates;
                spinand->bus_iface = ODTR;

                return 0;
        }

try_ssdr:
        if (spinand->flags & SPINAND_HAS_QE_BIT) {
                if (spinand->ssdr_op_templates.read_cache->data.buswidth == 4 ||
                    spinand->ssdr_op_templates.write_cache->data.buswidth == 4 ||
                    spinand->ssdr_op_templates.update_cache->data.buswidth == 4)
                        quad_enable = true;
        }

        ret = spinand_init_quad_enable(spinand, quad_enable);
        if (ret)
                return ret;

        if (spinand->configure_chip) {
                ret = spinand->configure_chip(spinand, SSDR);
                if (ret)
                        return ret;
        }

        return ret;
}

static int spinand_init_flash(struct spinand_device *spinand)
{
        struct device *dev = &spinand->spimem->spi->dev;
        struct nand_device *nand = spinand_to_nand(spinand);
        int ret, i;

        ret = spinand_read_cfg(spinand);
        if (ret)
                return ret;

        ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
        if (ret)
                return ret;

        ret = spinand_manufacturer_init(spinand);
        if (ret) {
                dev_err(dev,
                "Failed to initialize the SPI NAND chip (err = %d)\n",
                ret);
                return ret;
        }

        ret = spinand_configure_chip(spinand);
        if (ret)
                goto manuf_cleanup;

        /* After power up, all blocks are locked, so unlock them here. */
        for (i = 0; i < nand->memorg.ntargets; i++) {
                ret = spinand_select_target(spinand, i);
                if (ret)
                        goto manuf_cleanup;

                ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
                if (ret)
                        goto manuf_cleanup;
        }

        return 0;

manuf_cleanup:
        spinand_manufacturer_cleanup(spinand);

        return ret;
}

static void spinand_mtd_resume(struct mtd_info *mtd)
{
        struct spinand_device *spinand = mtd_to_spinand(mtd);
        int ret;

        ret = spinand_reset_op(spinand);
        if (ret)
                return;

        ret = spinand_init_flash(spinand);
        if (ret)
                return;

        spinand_ecc_enable(spinand, false);
}

static int spinand_mtd_suspend(struct mtd_info *mtd)
{
        struct spinand_device *spinand = mtd_to_spinand(mtd);
        int ret;

        /*
         * Return to SSDR interface in the suspend path to make sure the
         * reset operation is correctly processed upon resume.
         *
         * Note: Once back in SSDR mode, every operation but the page helpers
         * (dirmap based I/O accessors) will work. Page accesses would require
         * destroying and recreating the dirmaps twice to work, which would be
         * impacting for no reason, as this is just a transitional state.
         */
        if (spinand->bus_iface == ODTR) {
                ret = spinand->configure_chip(spinand, SSDR);
                if (ret)
                        return ret;

                spinand->op_templates = &spinand->ssdr_op_templates;
                spinand->bus_iface = SSDR;
        }

        return 0;
}

static int spinand_init(struct spinand_device *spinand)
{
        struct device *dev = &spinand->spimem->spi->dev;
        struct mtd_info *mtd = spinand_to_mtd(spinand);
        struct nand_device *nand = mtd_to_nanddev(mtd);
        int ret;

        /*
         * We need a scratch buffer because the spi_mem interface requires that
         * buf passed in spi_mem_op->data.buf be DMA-able.
         */
        spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
        if (!spinand->scratchbuf)
                return -ENOMEM;

        spinand_init_ssdr_templates(spinand);

        ret = spinand_detect(spinand);
        if (ret)
                goto err_free_bufs;

        /*
         * Use kzalloc() instead of devm_kzalloc() here, because some drivers
         * may use this buffer for DMA access.
         * Memory allocated by devm_ does not guarantee DMA-safe alignment.
         */
        spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
                                   GFP_KERNEL);
        if (!spinand->databuf) {
                ret = -ENOMEM;
                goto err_free_bufs;
        }

        spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);

        ret = spinand_init_cfg_cache(spinand);
        if (ret)
                goto err_free_bufs;

        ret = spinand_init_flash(spinand);
        if (ret)
                goto err_free_bufs;

        ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
        if (ret)
                goto err_manuf_cleanup;

        /* SPI-NAND default ECC engine is on-die */
        nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
        nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;

        spinand_ecc_enable(spinand, false);
        ret = nanddev_ecc_engine_init(nand);
        if (ret)
                goto err_cleanup_nanddev;

        /*
         * Continuous read can only be enabled with an on-die ECC engine, so the
         * ECC initialization must have happened previously.
         */
        spinand_cont_read_init(spinand);

        mtd->_read_oob = spinand_mtd_read;
        mtd->_write_oob = spinand_mtd_write;
        mtd->_block_isbad = spinand_mtd_block_isbad;
        mtd->_block_markbad = spinand_mtd_block_markbad;
        mtd->_block_isreserved = spinand_mtd_block_isreserved;
        mtd->_erase = spinand_mtd_erase;
        mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
        mtd->_suspend = spinand_mtd_suspend;
        mtd->_resume = spinand_mtd_resume;

        if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) {
                ret = spinand_set_mtd_otp_ops(spinand);
                if (ret)
                        goto err_cleanup_ecc_engine;
        }

        if (nand->ecc.engine) {
                ret = mtd_ooblayout_count_freebytes(mtd);
                if (ret < 0)
                        goto err_cleanup_ecc_engine;
        }

        mtd->oobavail = ret;

        /* Propagate ECC information to mtd_info */
        mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
        mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
        mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);

        ret = spinand_create_dirmaps(spinand);
        if (ret) {
                dev_err(dev,
                        "Failed to create direct mappings for read/write operations (err = %d)\n",
                        ret);
                goto err_cleanup_ecc_engine;
        }

        return 0;

err_cleanup_ecc_engine:
        nanddev_ecc_engine_cleanup(nand);

err_cleanup_nanddev:
        nanddev_cleanup(nand);

err_manuf_cleanup:
        spinand_manufacturer_cleanup(spinand);

err_free_bufs:
        kfree(spinand->databuf);
        kfree(spinand->scratchbuf);
        return ret;
}

static void spinand_cleanup(struct spinand_device *spinand)
{
        struct nand_device *nand = spinand_to_nand(spinand);

        nanddev_ecc_engine_cleanup(nand);
        nanddev_cleanup(nand);
        spinand_manufacturer_cleanup(spinand);
        kfree(spinand->databuf);
        kfree(spinand->scratchbuf);
}

static int spinand_probe(struct spi_mem *mem)
{
        struct spinand_device *spinand;
        struct mtd_info *mtd;
        int ret;

        spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
                               GFP_KERNEL);
        if (!spinand)
                return -ENOMEM;

        spinand->spimem = mem;
        spi_mem_set_drvdata(mem, spinand);
        spinand_set_of_node(spinand, mem->spi->dev.of_node);
        mutex_init(&spinand->lock);
        mtd = spinand_to_mtd(spinand);
        mtd->dev.parent = &mem->spi->dev;

        ret = spinand_init(spinand);
        if (ret)
                return ret;

        ret = mtd_device_register(mtd, NULL, 0);
        if (ret)
                goto err_spinand_cleanup;

        return 0;

err_spinand_cleanup:
        spinand_cleanup(spinand);

        return ret;
}

static int spinand_remove(struct spi_mem *mem)
{
        struct spinand_device *spinand;
        struct mtd_info *mtd;
        int ret;

        spinand = spi_mem_get_drvdata(mem);
        mtd = spinand_to_mtd(spinand);

        ret = mtd_device_unregister(mtd);
        if (ret)
                return ret;

        spinand_cleanup(spinand);

        return 0;
}

static const struct spi_device_id spinand_ids[] = {
        { .name = "spi-nand" },
        { /* sentinel */ },
};
MODULE_DEVICE_TABLE(spi, spinand_ids);

#ifdef CONFIG_OF
static const struct of_device_id spinand_of_ids[] = {
        { .compatible = "spi-nand" },
        { /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, spinand_of_ids);
#endif

static struct spi_mem_driver spinand_drv = {
        .spidrv = {
                .id_table = spinand_ids,
                .driver = {
                        .name = "spi-nand",
                        .of_match_table = of_match_ptr(spinand_of_ids),
                },
        },
        .probe = spinand_probe,
        .remove = spinand_remove,
};
module_spi_mem_driver(spinand_drv);

MODULE_DESCRIPTION("SPI NAND framework");
MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
MODULE_LICENSE("GPL v2");