root/drivers/mtd/nand/core.c
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2017 Free Electrons
 *
 * Authors:
 *      Boris Brezillon <boris.brezillon@free-electrons.com>
 *      Peter Pan <peterpandong@micron.com>
 */

#define pr_fmt(fmt)     "nand: " fmt

#include <linux/module.h>
#include <linux/mtd/nand.h>

/**
 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
 * @buf: buffer to test
 * @len: buffer length
 * @bitflips_threshold: maximum number of bitflips
 *
 * Check if a buffer contains only 0xff, which means the underlying region
 * has been erased and is ready to be programmed.
 * The bitflips_threshold specify the maximum number of bitflips before
 * considering the region is not erased.
 * Note: The logic of this function has been extracted from the memweight
 * implementation, except that nand_check_erased_buf function exit before
 * testing the whole buffer if the number of bitflips exceed the
 * bitflips_threshold value.
 *
 * Returns a positive number of bitflips less than or equal to
 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
 * threshold.
 */
static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
{
        const unsigned char *bitmap = buf;
        int bitflips = 0;
        int weight;

        for (; len && ((uintptr_t)bitmap) % sizeof(long);
             len--, bitmap++) {
                weight = hweight8(*bitmap);
                bitflips += BITS_PER_BYTE - weight;
                if (unlikely(bitflips > bitflips_threshold))
                        return -EBADMSG;
        }

        for (; len >= sizeof(long);
             len -= sizeof(long), bitmap += sizeof(long)) {
                unsigned long d = *((unsigned long *)bitmap);
                if (d == ~0UL)
                        continue;
                weight = hweight_long(d);
                bitflips += BITS_PER_LONG - weight;
                if (unlikely(bitflips > bitflips_threshold))
                        return -EBADMSG;
        }

        for (; len > 0; len--, bitmap++) {
                weight = hweight8(*bitmap);
                bitflips += BITS_PER_BYTE - weight;
                if (unlikely(bitflips > bitflips_threshold))
                        return -EBADMSG;
        }

        return bitflips;
}

/**
 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
 *                               0xff data
 * @data: data buffer to test
 * @datalen: data length
 * @ecc: ECC buffer
 * @ecclen: ECC length
 * @extraoob: extra OOB buffer
 * @extraooblen: extra OOB length
 * @bitflips_threshold: maximum number of bitflips
 *
 * Check if a data buffer and its associated ECC and OOB data contains only
 * 0xff pattern, which means the underlying region has been erased and is
 * ready to be programmed.
 * The bitflips_threshold specify the maximum number of bitflips before
 * considering the region as not erased.
 *
 * Note:
 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
 *    different from the NAND page size. When fixing bitflips, ECC engines will
 *    report the number of errors per chunk, and the NAND core infrastructure
 *    expect you to return the maximum number of bitflips for the whole page.
 *    This is why you should always use this function on a single chunk and
 *    not on the whole page. After checking each chunk you should update your
 *    max_bitflips value accordingly.
 * 2/ When checking for bitflips in erased pages you should not only check
 *    the payload data but also their associated ECC data, because a user might
 *    have programmed almost all bits to 1 but a few. In this case, we
 *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
 *    this case.
 * 3/ The extraoob argument is optional, and should be used if some of your OOB
 *    data are protected by the ECC engine.
 *    It could also be used if you support subpages and want to attach some
 *    extra OOB data to an ECC chunk.
 *
 * Returns a positive number of bitflips less than or equal to
 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
 * threshold. In case of success, the passed buffers are filled with 0xff.
 */
int nand_check_erased_ecc_chunk(void *data, int datalen,
                                void *ecc, int ecclen,
                                void *extraoob, int extraooblen,
                                int bitflips_threshold)
{
        int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;

        data_bitflips = nand_check_erased_buf(data, datalen,
                                              bitflips_threshold);
        if (data_bitflips < 0)
                return data_bitflips;

        bitflips_threshold -= data_bitflips;

        ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
        if (ecc_bitflips < 0)
                return ecc_bitflips;

        bitflips_threshold -= ecc_bitflips;

        extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
                                                  bitflips_threshold);
        if (extraoob_bitflips < 0)
                return extraoob_bitflips;

        if (data_bitflips)
                memset(data, 0xff, datalen);

        if (ecc_bitflips)
                memset(ecc, 0xff, ecclen);

        if (extraoob_bitflips)
                memset(extraoob, 0xff, extraooblen);

        return data_bitflips + ecc_bitflips + extraoob_bitflips;
}
EXPORT_SYMBOL(nand_check_erased_ecc_chunk);

/**
 * nanddev_isbad() - Check if a block is bad
 * @nand: NAND device
 * @pos: position pointing to the block we want to check
 *
 * Return: true if the block is bad, false otherwise.
 */
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
        if (mtd_check_expert_analysis_mode())
                return false;

        if (nanddev_bbt_is_initialized(nand)) {
                unsigned int entry;
                int status;

                entry = nanddev_bbt_pos_to_entry(nand, pos);
                status = nanddev_bbt_get_block_status(nand, entry);
                /* Lazy block status retrieval */
                if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
                        if (nand->ops->isbad(nand, pos))
                                status = NAND_BBT_BLOCK_FACTORY_BAD;
                        else
                                status = NAND_BBT_BLOCK_GOOD;

                        nanddev_bbt_set_block_status(nand, entry, status);
                }

                if (status == NAND_BBT_BLOCK_WORN ||
                    status == NAND_BBT_BLOCK_FACTORY_BAD)
                        return true;

                return false;
        }

        return nand->ops->isbad(nand, pos);
}
EXPORT_SYMBOL_GPL(nanddev_isbad);

/**
 * nanddev_markbad() - Mark a block as bad
 * @nand: NAND device
 * @pos: position of the block to mark bad
 *
 * Mark a block bad. This function is updating the BBT if available and
 * calls the low-level markbad hook (nand->ops->markbad()).
 *
 * Return: 0 in case of success, a negative error code otherwise.
 */
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
{
        struct mtd_info *mtd = nanddev_to_mtd(nand);
        unsigned int entry;
        int ret = 0;

        if (nanddev_isbad(nand, pos))
                return 0;

        ret = nand->ops->markbad(nand, pos);
        if (ret)
                pr_warn("failed to write BBM to block @%llx (err = %d)\n",
                        nanddev_pos_to_offs(nand, pos), ret);

        if (!nanddev_bbt_is_initialized(nand))
                goto out;

        entry = nanddev_bbt_pos_to_entry(nand, pos);
        ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
        if (ret)
                goto out;

        ret = nanddev_bbt_update(nand);

out:
        if (!ret)
                mtd->ecc_stats.badblocks++;

        return ret;
}
EXPORT_SYMBOL_GPL(nanddev_markbad);

/**
 * nanddev_isreserved() - Check whether an eraseblock is reserved or not
 * @nand: NAND device
 * @pos: NAND position to test
 *
 * Checks whether the eraseblock pointed by @pos is reserved or not.
 *
 * Return: true if the eraseblock is reserved, false otherwise.
 */
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
{
        unsigned int entry;
        int status;

        if (!nanddev_bbt_is_initialized(nand))
                return false;

        /* Return info from the table */
        entry = nanddev_bbt_pos_to_entry(nand, pos);
        status = nanddev_bbt_get_block_status(nand, entry);
        return status == NAND_BBT_BLOCK_RESERVED;
}
EXPORT_SYMBOL_GPL(nanddev_isreserved);

/**
 * nanddev_erase() - Erase a NAND portion
 * @nand: NAND device
 * @pos: position of the block to erase
 *
 * Erases the block if it's not bad.
 *
 * Return: 0 in case of success, a negative error code otherwise.
 */
static int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
{
        if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
                pr_warn("attempt to erase a bad/reserved block @%llx\n",
                        nanddev_pos_to_offs(nand, pos));
                return -EIO;
        }

        return nand->ops->erase(nand, pos);
}

/**
 * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
 * @mtd: MTD device
 * @einfo: erase request
 *
 * This is a simple mtd->_erase() implementation iterating over all blocks
 * concerned by @einfo and calling nand->ops->erase() on each of them.
 *
 * Note that mtd->_erase should not be directly assigned to this helper,
 * because there's no locking here. NAND specialized layers should instead
 * implement there own wrapper around nanddev_mtd_erase() taking the
 * appropriate lock before calling nanddev_mtd_erase().
 *
 * Return: 0 in case of success, a negative error code otherwise.
 */
int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
{
        struct nand_device *nand = mtd_to_nanddev(mtd);
        struct nand_pos pos, last;
        int ret;

        nanddev_offs_to_pos(nand, einfo->addr, &pos);
        nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
        while (nanddev_pos_cmp(&pos, &last) <= 0) {
                ret = nanddev_erase(nand, &pos);
                if (ret) {
                        einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);

                        return ret;
                }

                nanddev_pos_next_eraseblock(nand, &pos);
        }

        return 0;
}
EXPORT_SYMBOL_GPL(nanddev_mtd_erase);

/**
 * nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on
 *                                a specific region of the NAND device
 * @mtd: MTD device
 * @offs: offset of the NAND region
 * @len: length of the NAND region
 *
 * Default implementation for mtd->_max_bad_blocks(). Only works if
 * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
 *
 * Return: a positive number encoding the maximum number of eraseblocks on a
 * portion of memory, a negative error code otherwise.
 */
int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
{
        struct nand_device *nand = mtd_to_nanddev(mtd);
        struct nand_pos pos, end;
        unsigned int max_bb = 0;

        if (!nand->memorg.max_bad_eraseblocks_per_lun)
                return -ENOTSUPP;

        nanddev_offs_to_pos(nand, offs, &pos);
        nanddev_offs_to_pos(nand, offs + len, &end);

        for (nanddev_offs_to_pos(nand, offs, &pos);
             nanddev_pos_cmp(&pos, &end) < 0;
             nanddev_pos_next_lun(nand, &pos))
                max_bb += nand->memorg.max_bad_eraseblocks_per_lun;

        return max_bb;
}
EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);

/**
 * nanddev_get_ecc_engine() - Find and get a suitable ECC engine
 * @nand: NAND device
 */
static int nanddev_get_ecc_engine(struct nand_device *nand)
{
        int engine_type;

        /* Read the user desires in terms of ECC engine/configuration */
        of_get_nand_ecc_user_config(nand);

        engine_type = nand->ecc.user_conf.engine_type;
        if (engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
                engine_type = nand->ecc.defaults.engine_type;

        switch (engine_type) {
        case NAND_ECC_ENGINE_TYPE_NONE:
                return 0;
        case NAND_ECC_ENGINE_TYPE_SOFT:
                nand->ecc.engine = nand_ecc_get_sw_engine(nand);
                break;
        case NAND_ECC_ENGINE_TYPE_ON_DIE:
                nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand);
                break;
        case NAND_ECC_ENGINE_TYPE_ON_HOST:
                nand->ecc.engine = nand_ecc_get_on_host_hw_engine(nand);
                if (PTR_ERR(nand->ecc.engine) == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
                break;
        default:
                pr_err("Missing ECC engine type\n");
        }

        if (!nand->ecc.engine)
                return  -EINVAL;

        return 0;
}

/**
 * nanddev_put_ecc_engine() - Dettach and put the in-use ECC engine
 * @nand: NAND device
 */
static int nanddev_put_ecc_engine(struct nand_device *nand)
{
        switch (nand->ecc.ctx.conf.engine_type) {
        case NAND_ECC_ENGINE_TYPE_ON_HOST:
                nand_ecc_put_on_host_hw_engine(nand);
                break;
        case NAND_ECC_ENGINE_TYPE_NONE:
        case NAND_ECC_ENGINE_TYPE_SOFT:
        case NAND_ECC_ENGINE_TYPE_ON_DIE:
        default:
                break;
        }

        return 0;
}

/**
 * nanddev_find_ecc_configuration() - Find a suitable ECC configuration
 * @nand: NAND device
 */
static int nanddev_find_ecc_configuration(struct nand_device *nand)
{
        int ret;

        if (!nand->ecc.engine)
                return -ENOTSUPP;

        ret = nand_ecc_init_ctx(nand);
        if (ret)
                return ret;

        if (!nand_ecc_is_strong_enough(nand))
                pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
                        nand->mtd.name);

        return 0;
}

/**
 * nanddev_ecc_engine_init() - Initialize an ECC engine for the chip
 * @nand: NAND device
 */
int nanddev_ecc_engine_init(struct nand_device *nand)
{
        int ret;

        /* Look for the ECC engine to use */
        ret = nanddev_get_ecc_engine(nand);
        if (ret) {
                if (ret != -EPROBE_DEFER)
                        pr_err("No ECC engine found\n");

                return ret;
        }

        /* No ECC engine requested */
        if (!nand->ecc.engine)
                return 0;

        /* Configure the engine: balance user input and chip requirements */
        ret = nanddev_find_ecc_configuration(nand);
        if (ret) {
                pr_err("No suitable ECC configuration\n");
                nanddev_put_ecc_engine(nand);

                return ret;
        }

        return 0;
}
EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init);

/**
 * nanddev_ecc_engine_cleanup() - Cleanup ECC engine initializations
 * @nand: NAND device
 */
void nanddev_ecc_engine_cleanup(struct nand_device *nand)
{
        if (nand->ecc.engine)
                nand_ecc_cleanup_ctx(nand);

        nanddev_put_ecc_engine(nand);
}
EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup);

/**
 * nanddev_init() - Initialize a NAND device
 * @nand: NAND device
 * @ops: NAND device operations
 * @owner: NAND device owner
 *
 * Initializes a NAND device object. Consistency checks are done on @ops and
 * @nand->memorg. Also takes care of initializing the BBT.
 *
 * Return: 0 in case of success, a negative error code otherwise.
 */
int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
                 struct module *owner)
{
        struct mtd_info *mtd = nanddev_to_mtd(nand);
        struct nand_memory_organization *memorg = nanddev_get_memorg(nand);

        if (!nand || !ops)
                return -EINVAL;

        if (!ops->erase || !ops->markbad || !ops->isbad)
                return -EINVAL;

        if (!memorg->bits_per_cell || !memorg->pagesize ||
            !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
            !memorg->planes_per_lun || !memorg->luns_per_target ||
            !memorg->ntargets)
                return -EINVAL;

        nand->rowconv.eraseblock_addr_shift =
                                        fls(memorg->pages_per_eraseblock - 1);
        nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
                                       nand->rowconv.eraseblock_addr_shift;

        nand->ops = ops;

        mtd->type = memorg->bits_per_cell == 1 ?
                    MTD_NANDFLASH : MTD_MLCNANDFLASH;
        mtd->flags = MTD_CAP_NANDFLASH;
        mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
        mtd->writesize = memorg->pagesize;
        mtd->writebufsize = memorg->pagesize;
        mtd->oobsize = memorg->oobsize;
        mtd->size = nanddev_size(nand);
        mtd->owner = owner;

        return nanddev_bbt_init(nand);
}
EXPORT_SYMBOL_GPL(nanddev_init);

/**
 * nanddev_cleanup() - Release resources allocated in nanddev_init()
 * @nand: NAND device
 *
 * Basically undoes what has been done in nanddev_init().
 */
void nanddev_cleanup(struct nand_device *nand)
{
        if (nanddev_bbt_is_initialized(nand))
                nanddev_bbt_cleanup(nand);
}
EXPORT_SYMBOL_GPL(nanddev_cleanup);

MODULE_DESCRIPTION("Generic NAND framework");
MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
MODULE_LICENSE("GPL v2");