root/drivers/nvme/target/fcloop.c
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2016 Avago Technologies.  All rights reserved.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/parser.h>
#include <uapi/scsi/fc/fc_fs.h>

#include "../host/nvme.h"
#include "../target/nvmet.h"
#include <linux/nvme-fc-driver.h>
#include <linux/nvme-fc.h>


enum {
        NVMF_OPT_ERR            = 0,
        NVMF_OPT_WWNN           = 1 << 0,
        NVMF_OPT_WWPN           = 1 << 1,
        NVMF_OPT_ROLES          = 1 << 2,
        NVMF_OPT_FCADDR         = 1 << 3,
        NVMF_OPT_LPWWNN         = 1 << 4,
        NVMF_OPT_LPWWPN         = 1 << 5,
};

struct fcloop_ctrl_options {
        int                     mask;
        u64                     wwnn;
        u64                     wwpn;
        u32                     roles;
        u32                     fcaddr;
        u64                     lpwwnn;
        u64                     lpwwpn;
};

static const match_table_t opt_tokens = {
        { NVMF_OPT_WWNN,        "wwnn=%s"       },
        { NVMF_OPT_WWPN,        "wwpn=%s"       },
        { NVMF_OPT_ROLES,       "roles=%d"      },
        { NVMF_OPT_FCADDR,      "fcaddr=%x"     },
        { NVMF_OPT_LPWWNN,      "lpwwnn=%s"     },
        { NVMF_OPT_LPWWPN,      "lpwwpn=%s"     },
        { NVMF_OPT_ERR,         NULL            }
};

static int fcloop_verify_addr(substring_t *s)
{
        size_t blen = s->to - s->from + 1;

        if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
            strncmp(s->from, "0x", 2))
                return -EINVAL;

        return 0;
}

static int
fcloop_parse_options(struct fcloop_ctrl_options *opts,
                const char *buf)
{
        substring_t args[MAX_OPT_ARGS];
        char *options, *o, *p;
        int token, ret = 0;
        u64 token64;

        options = o = kstrdup(buf, GFP_KERNEL);
        if (!options)
                return -ENOMEM;

        while ((p = strsep(&o, ",\n")) != NULL) {
                if (!*p)
                        continue;

                token = match_token(p, opt_tokens, args);
                opts->mask |= token;
                switch (token) {
                case NVMF_OPT_WWNN:
                        if (fcloop_verify_addr(args) ||
                            match_u64(args, &token64)) {
                                ret = -EINVAL;
                                goto out_free_options;
                        }
                        opts->wwnn = token64;
                        break;
                case NVMF_OPT_WWPN:
                        if (fcloop_verify_addr(args) ||
                            match_u64(args, &token64)) {
                                ret = -EINVAL;
                                goto out_free_options;
                        }
                        opts->wwpn = token64;
                        break;
                case NVMF_OPT_ROLES:
                        if (match_int(args, &token)) {
                                ret = -EINVAL;
                                goto out_free_options;
                        }
                        opts->roles = token;
                        break;
                case NVMF_OPT_FCADDR:
                        if (match_hex(args, &token)) {
                                ret = -EINVAL;
                                goto out_free_options;
                        }
                        opts->fcaddr = token;
                        break;
                case NVMF_OPT_LPWWNN:
                        if (fcloop_verify_addr(args) ||
                            match_u64(args, &token64)) {
                                ret = -EINVAL;
                                goto out_free_options;
                        }
                        opts->lpwwnn = token64;
                        break;
                case NVMF_OPT_LPWWPN:
                        if (fcloop_verify_addr(args) ||
                            match_u64(args, &token64)) {
                                ret = -EINVAL;
                                goto out_free_options;
                        }
                        opts->lpwwpn = token64;
                        break;
                default:
                        pr_warn("unknown parameter or missing value '%s'\n", p);
                        ret = -EINVAL;
                        goto out_free_options;
                }
        }

out_free_options:
        kfree(options);
        return ret;
}


static int
fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
                const char *buf)
{
        substring_t args[MAX_OPT_ARGS];
        char *options, *o, *p;
        int token, ret = 0;
        u64 token64;

        *nname = -1;
        *pname = -1;

        options = o = kstrdup(buf, GFP_KERNEL);
        if (!options)
                return -ENOMEM;

        while ((p = strsep(&o, ",\n")) != NULL) {
                if (!*p)
                        continue;

                token = match_token(p, opt_tokens, args);
                switch (token) {
                case NVMF_OPT_WWNN:
                        if (fcloop_verify_addr(args) ||
                            match_u64(args, &token64)) {
                                ret = -EINVAL;
                                goto out_free_options;
                        }
                        *nname = token64;
                        break;
                case NVMF_OPT_WWPN:
                        if (fcloop_verify_addr(args) ||
                            match_u64(args, &token64)) {
                                ret = -EINVAL;
                                goto out_free_options;
                        }
                        *pname = token64;
                        break;
                default:
                        pr_warn("unknown parameter or missing value '%s'\n", p);
                        ret = -EINVAL;
                        goto out_free_options;
                }
        }

out_free_options:
        kfree(options);

        if (!ret) {
                if (*nname == -1)
                        return -EINVAL;
                if (*pname == -1)
                        return -EINVAL;
        }

        return ret;
}


#define LPORT_OPTS      (NVMF_OPT_WWNN | NVMF_OPT_WWPN)

#define RPORT_OPTS      (NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
                         NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)

#define TGTPORT_OPTS    (NVMF_OPT_WWNN | NVMF_OPT_WWPN)


static DEFINE_SPINLOCK(fcloop_lock);
static LIST_HEAD(fcloop_lports);
static LIST_HEAD(fcloop_nports);

struct fcloop_lport {
        struct nvme_fc_local_port *localport;
        struct list_head lport_list;
        refcount_t ref;
};

struct fcloop_lport_priv {
        struct fcloop_lport *lport;
};

/* The port is already being removed, avoid double free */
#define PORT_DELETED    0

struct fcloop_rport {
        struct nvme_fc_remote_port      *remoteport;
        struct nvmet_fc_target_port     *targetport;
        struct fcloop_nport             *nport;
        struct fcloop_lport             *lport;
        spinlock_t                      lock;
        struct list_head                ls_list;
        struct work_struct              ls_work;
        unsigned long                   flags;
};

struct fcloop_tport {
        struct nvmet_fc_target_port     *targetport;
        struct nvme_fc_remote_port      *remoteport;
        struct fcloop_nport             *nport;
        struct fcloop_lport             *lport;
        spinlock_t                      lock;
        struct list_head                ls_list;
        struct work_struct              ls_work;
        unsigned long                   flags;
};

struct fcloop_nport {
        struct fcloop_rport *rport;
        struct fcloop_tport *tport;
        struct fcloop_lport *lport;
        struct list_head nport_list;
        refcount_t ref;
        u64 node_name;
        u64 port_name;
        u32 port_role;
        u32 port_id;
};

struct fcloop_lsreq {
        struct nvmefc_ls_req            *lsreq;
        struct nvmefc_ls_rsp            ls_rsp;
        int                             status;
        struct list_head                ls_list; /* fcloop_rport->ls_list */
};

struct fcloop_rscn {
        struct fcloop_tport             *tport;
        struct work_struct              work;
};

enum {
        INI_IO_START            = 0,
        INI_IO_ACTIVE           = 1,
        INI_IO_ABORTED          = 2,
        INI_IO_COMPLETED        = 3,
};

struct fcloop_fcpreq {
        struct fcloop_tport             *tport;
        struct nvmefc_fcp_req           *fcpreq;
        spinlock_t                      reqlock;
        u16                             status;
        u32                             inistate;
        bool                            active;
        bool                            aborted;
        refcount_t                      ref;
        struct work_struct              fcp_rcv_work;
        struct work_struct              abort_rcv_work;
        struct work_struct              tio_done_work;
        struct nvmefc_tgt_fcp_req       tgt_fcp_req;
};

struct fcloop_ini_fcpreq {
        struct nvmefc_fcp_req           *fcpreq;
        struct fcloop_fcpreq            *tfcp_req;
        spinlock_t                      inilock;
};

/* SLAB cache for fcloop_lsreq structures */
static struct kmem_cache *lsreq_cache;

static inline struct fcloop_lsreq *
ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
{
        return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
}

static inline struct fcloop_fcpreq *
tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
{
        return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
}


static int
fcloop_create_queue(struct nvme_fc_local_port *localport,
                        unsigned int qidx, u16 qsize,
                        void **handle)
{
        *handle = localport;
        return 0;
}

static void
fcloop_delete_queue(struct nvme_fc_local_port *localport,
                        unsigned int idx, void *handle)
{
}

static void
fcloop_rport_lsrqst_work(struct work_struct *work)
{
        struct fcloop_rport *rport =
                container_of(work, struct fcloop_rport, ls_work);
        struct fcloop_lsreq *tls_req;

        spin_lock(&rport->lock);
        for (;;) {
                tls_req = list_first_entry_or_null(&rport->ls_list,
                                struct fcloop_lsreq, ls_list);
                if (!tls_req)
                        break;

                list_del(&tls_req->ls_list);
                spin_unlock(&rport->lock);

                tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
                /*
                 * callee may free memory containing tls_req.
                 * do not reference lsreq after this.
                 */
                kmem_cache_free(lsreq_cache, tls_req);

                spin_lock(&rport->lock);
        }
        spin_unlock(&rport->lock);
}

static int
fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
                        struct nvme_fc_remote_port *remoteport,
                        struct nvmefc_ls_req *lsreq)
{
        struct fcloop_rport *rport = remoteport->private;
        struct fcloop_lsreq *tls_req;
        int ret = 0;

        tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL);
        if (!tls_req)
                return -ENOMEM;
        tls_req->lsreq = lsreq;
        INIT_LIST_HEAD(&tls_req->ls_list);

        if (!rport->targetport) {
                tls_req->status = -ECONNREFUSED;
                spin_lock(&rport->lock);
                list_add_tail(&tls_req->ls_list, &rport->ls_list);
                spin_unlock(&rport->lock);
                queue_work(nvmet_wq, &rport->ls_work);
                return ret;
        }

        tls_req->status = 0;
        ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
                                  &tls_req->ls_rsp,
                                  lsreq->rqstaddr, lsreq->rqstlen);

        return ret;
}

static int
fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
                        struct nvmefc_ls_rsp *lsrsp)
{
        struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
        struct nvmefc_ls_req *lsreq = tls_req->lsreq;
        struct fcloop_tport *tport = targetport->private;
        struct nvme_fc_remote_port *remoteport = tport->remoteport;
        struct fcloop_rport *rport;

        memcpy(lsreq->rspaddr, lsrsp->rspbuf,
                ((lsreq->rsplen < lsrsp->rsplen) ?
                                lsreq->rsplen : lsrsp->rsplen));

        lsrsp->done(lsrsp);

        if (!remoteport) {
                kmem_cache_free(lsreq_cache, tls_req);
                return 0;
        }

        rport = remoteport->private;
        spin_lock(&rport->lock);
        list_add_tail(&tls_req->ls_list, &rport->ls_list);
        spin_unlock(&rport->lock);
        queue_work(nvmet_wq, &rport->ls_work);

        return 0;
}

static void
fcloop_tport_lsrqst_work(struct work_struct *work)
{
        struct fcloop_tport *tport =
                container_of(work, struct fcloop_tport, ls_work);
        struct fcloop_lsreq *tls_req;

        spin_lock(&tport->lock);
        for (;;) {
                tls_req = list_first_entry_or_null(&tport->ls_list,
                                struct fcloop_lsreq, ls_list);
                if (!tls_req)
                        break;

                list_del(&tls_req->ls_list);
                spin_unlock(&tport->lock);

                tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
                /*
                 * callee may free memory containing tls_req.
                 * do not reference lsreq after this.
                 */
                kmem_cache_free(lsreq_cache, tls_req);

                spin_lock(&tport->lock);
        }
        spin_unlock(&tport->lock);
}

static int
fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
                        struct nvmefc_ls_req *lsreq)
{
        struct fcloop_tport *tport = targetport->private;
        struct fcloop_lsreq *tls_req;
        int ret = 0;

        /*
         * hosthandle should be the dst.rport value.
         * hosthandle ignored as fcloop currently is
         * 1:1 tgtport vs remoteport
         */

        tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL);
        if (!tls_req)
                return -ENOMEM;
        tls_req->lsreq = lsreq;
        INIT_LIST_HEAD(&tls_req->ls_list);

        if (!tport->remoteport) {
                tls_req->status = -ECONNREFUSED;
                spin_lock(&tport->lock);
                list_add_tail(&tls_req->ls_list, &tport->ls_list);
                spin_unlock(&tport->lock);
                queue_work(nvmet_wq, &tport->ls_work);
                return ret;
        }

        tls_req->status = 0;
        ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
                                 lsreq->rqstaddr, lsreq->rqstlen);

        if (ret)
                kmem_cache_free(lsreq_cache, tls_req);

        return ret;
}

static int
fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
                        struct nvme_fc_remote_port *remoteport,
                        struct nvmefc_ls_rsp *lsrsp)
{
        struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
        struct nvmefc_ls_req *lsreq = tls_req->lsreq;
        struct fcloop_rport *rport = remoteport->private;
        struct nvmet_fc_target_port *targetport = rport->targetport;
        struct fcloop_tport *tport;
        int ret = 0;

        if (!targetport) {
                /*
                 * The target port is gone. The target doesn't expect any
                 * response anymore and thus lsreq can't be accessed anymore.
                 *
                 * We end up here from delete association exchange:
                 * nvmet_fc_xmt_disconnect_assoc sends an async request.
                 *
                 * Return success when remoteport is still online because this
                 * is what LLDDs do and silently drop the response.  Otherwise,
                 * return with error to signal upper layer to perform the lsrsp
                 * resource cleanup.
                 */
                if (remoteport->port_state == FC_OBJSTATE_ONLINE)
                        lsrsp->done(lsrsp);
                else
                        ret = -ENODEV;

                kmem_cache_free(lsreq_cache, tls_req);
                return ret;
        }

        memcpy(lsreq->rspaddr, lsrsp->rspbuf,
                ((lsreq->rsplen < lsrsp->rsplen) ?
                                lsreq->rsplen : lsrsp->rsplen));
        lsrsp->done(lsrsp);

        tport = targetport->private;
        spin_lock(&tport->lock);
        list_add_tail(&tls_req->ls_list, &tport->ls_list);
        spin_unlock(&tport->lock);
        queue_work(nvmet_wq, &tport->ls_work);

        return 0;
}

static void
fcloop_t2h_host_release(void *hosthandle)
{
        /* host handle ignored for now */
}

static int
fcloop_t2h_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn)
{
        struct fcloop_rport *rport = hosthandle;

        *wwnn = rport->lport->localport->node_name;
        *wwpn = rport->lport->localport->port_name;
        return 0;
}

/*
 * Simulate reception of RSCN and converting it to a initiator transport
 * call to rescan a remote port.
 */
static void
fcloop_tgt_rscn_work(struct work_struct *work)
{
        struct fcloop_rscn *tgt_rscn =
                container_of(work, struct fcloop_rscn, work);
        struct fcloop_tport *tport = tgt_rscn->tport;

        if (tport->remoteport)
                nvme_fc_rescan_remoteport(tport->remoteport);
        kfree(tgt_rscn);
}

static void
fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
{
        struct fcloop_rscn *tgt_rscn;

        tgt_rscn = kzalloc_obj(*tgt_rscn);
        if (!tgt_rscn)
                return;

        tgt_rscn->tport = tgtport->private;
        INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);

        queue_work(nvmet_wq, &tgt_rscn->work);
}

static void
fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
{
        if (!refcount_dec_and_test(&tfcp_req->ref))
                return;

        kfree(tfcp_req);
}

static int
fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
{
        return refcount_inc_not_zero(&tfcp_req->ref);
}

static void
fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
                        struct fcloop_fcpreq *tfcp_req, int status)
{
        struct fcloop_ini_fcpreq *inireq = NULL;

        if (fcpreq) {
                inireq = fcpreq->private;
                spin_lock(&inireq->inilock);
                inireq->tfcp_req = NULL;
                spin_unlock(&inireq->inilock);

                fcpreq->status = status;
                fcpreq->done(fcpreq);
        }

        /* release original io reference on tgt struct */
        if (tfcp_req)
                fcloop_tfcp_req_put(tfcp_req);
}

static bool drop_fabric_opcode;
#define DROP_OPCODE_MASK        0x00FF
/* fabrics opcode will have a bit set above 1st byte */
static int drop_opcode = -1;
static int drop_instance;
static int drop_amount;
static int drop_current_cnt;

/*
 * Routine to parse io and determine if the io is to be dropped.
 * Returns:
 *  0 if io is not obstructed
 *  1 if io was dropped
 */
static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
{
        struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
        struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr;
        struct nvme_command *sqe = &cmdiu->sqe;

        if (drop_opcode == -1)
                return 0;

        pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
                "inst %d start %d amt %d\n",
                __func__, sqe->common.opcode, sqe->fabrics.fctype,
                drop_fabric_opcode ? "y" : "n",
                drop_opcode, drop_current_cnt, drop_instance, drop_amount);

        if ((drop_fabric_opcode &&
             (sqe->common.opcode != nvme_fabrics_command ||
              sqe->fabrics.fctype != drop_opcode)) ||
            (!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
                return 0;

        if (++drop_current_cnt >= drop_instance) {
                if (drop_current_cnt >= drop_instance + drop_amount)
                        drop_opcode = -1;
                return 1;
        }

        return 0;
}

static void
fcloop_fcp_recv_work(struct work_struct *work)
{
        struct fcloop_fcpreq *tfcp_req =
                container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
        struct nvmefc_fcp_req *fcpreq;
        unsigned long flags;
        int ret = 0;
        bool aborted = false;

        spin_lock_irqsave(&tfcp_req->reqlock, flags);
        fcpreq = tfcp_req->fcpreq;
        switch (tfcp_req->inistate) {
        case INI_IO_START:
                tfcp_req->inistate = INI_IO_ACTIVE;
                break;
        case INI_IO_ABORTED:
                aborted = true;
                break;
        default:
                spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
                WARN_ON(1);
                return;
        }
        spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

        if (unlikely(aborted)) {
                /* the abort handler will call fcloop_call_host_done */
                return;
        }

        if (unlikely(check_for_drop(tfcp_req))) {
                pr_info("%s: dropped command ********\n", __func__);
                return;
        }

        ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
                                   &tfcp_req->tgt_fcp_req,
                                   fcpreq->cmdaddr, fcpreq->cmdlen);
        if (ret)
                fcloop_call_host_done(fcpreq, tfcp_req, ret);
}

static void
fcloop_fcp_abort_recv_work(struct work_struct *work)
{
        struct fcloop_fcpreq *tfcp_req =
                container_of(work, struct fcloop_fcpreq, abort_rcv_work);
        struct nvmefc_fcp_req *fcpreq;
        bool completed = false;
        unsigned long flags;

        spin_lock_irqsave(&tfcp_req->reqlock, flags);
        switch (tfcp_req->inistate) {
        case INI_IO_ABORTED:
                fcpreq = tfcp_req->fcpreq;
                tfcp_req->fcpreq = NULL;
                break;
        case INI_IO_COMPLETED:
                completed = true;
                break;
        default:
                spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
                fcloop_tfcp_req_put(tfcp_req);
                WARN_ON(1);
                return;
        }
        spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

        if (unlikely(completed)) {
                /* remove reference taken in original abort downcall */
                fcloop_tfcp_req_put(tfcp_req);
                return;
        }

        if (tfcp_req->tport->targetport)
                nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
                                        &tfcp_req->tgt_fcp_req);

        fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
        /* call_host_done releases reference for abort downcall */
}

/*
 * FCP IO operation done by target completion.
 * call back up initiator "done" flows.
 */
static void
fcloop_tgt_fcprqst_done_work(struct work_struct *work)
{
        struct fcloop_fcpreq *tfcp_req =
                container_of(work, struct fcloop_fcpreq, tio_done_work);
        struct nvmefc_fcp_req *fcpreq;
        unsigned long flags;

        spin_lock_irqsave(&tfcp_req->reqlock, flags);
        fcpreq = tfcp_req->fcpreq;
        tfcp_req->inistate = INI_IO_COMPLETED;
        spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

        fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
}


static int
fcloop_fcp_req(struct nvme_fc_local_port *localport,
                        struct nvme_fc_remote_port *remoteport,
                        void *hw_queue_handle,
                        struct nvmefc_fcp_req *fcpreq)
{
        struct fcloop_rport *rport = remoteport->private;
        struct fcloop_ini_fcpreq *inireq = fcpreq->private;
        struct fcloop_fcpreq *tfcp_req;

        if (!rport->targetport)
                return -ECONNREFUSED;

        tfcp_req = kzalloc_obj(*tfcp_req, GFP_ATOMIC);
        if (!tfcp_req)
                return -ENOMEM;

        inireq->fcpreq = fcpreq;
        inireq->tfcp_req = tfcp_req;
        spin_lock_init(&inireq->inilock);

        tfcp_req->fcpreq = fcpreq;
        tfcp_req->tport = rport->targetport->private;
        tfcp_req->inistate = INI_IO_START;
        spin_lock_init(&tfcp_req->reqlock);
        INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
        INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
        INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
        refcount_set(&tfcp_req->ref, 1);

        queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);

        return 0;
}

static void
fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
                        struct scatterlist *io_sg, u32 offset, u32 length)
{
        void *data_p, *io_p;
        u32 data_len, io_len, tlen;

        io_p = sg_virt(io_sg);
        io_len = io_sg->length;

        for ( ; offset; ) {
                tlen = min_t(u32, offset, io_len);
                offset -= tlen;
                io_len -= tlen;
                if (!io_len) {
                        io_sg = sg_next(io_sg);
                        io_p = sg_virt(io_sg);
                        io_len = io_sg->length;
                } else
                        io_p += tlen;
        }

        data_p = sg_virt(data_sg);
        data_len = data_sg->length;

        for ( ; length; ) {
                tlen = min_t(u32, io_len, data_len);
                tlen = min_t(u32, tlen, length);

                if (op == NVMET_FCOP_WRITEDATA)
                        memcpy(data_p, io_p, tlen);
                else
                        memcpy(io_p, data_p, tlen);

                length -= tlen;

                io_len -= tlen;
                if ((!io_len) && (length)) {
                        io_sg = sg_next(io_sg);
                        io_p = sg_virt(io_sg);
                        io_len = io_sg->length;
                } else
                        io_p += tlen;

                data_len -= tlen;
                if ((!data_len) && (length)) {
                        data_sg = sg_next(data_sg);
                        data_p = sg_virt(data_sg);
                        data_len = data_sg->length;
                } else
                        data_p += tlen;
        }
}

static int
fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
                        struct nvmefc_tgt_fcp_req *tgt_fcpreq)
{
        struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
        struct nvmefc_fcp_req *fcpreq;
        u32 rsplen = 0, xfrlen = 0;
        int fcp_err = 0, active, aborted;
        u8 op = tgt_fcpreq->op;
        unsigned long flags;

        spin_lock_irqsave(&tfcp_req->reqlock, flags);
        fcpreq = tfcp_req->fcpreq;
        active = tfcp_req->active;
        aborted = tfcp_req->aborted;
        tfcp_req->active = true;
        spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

        if (unlikely(active))
                /* illegal - call while i/o active */
                return -EALREADY;

        if (unlikely(aborted)) {
                /* target transport has aborted i/o prior */
                spin_lock_irqsave(&tfcp_req->reqlock, flags);
                tfcp_req->active = false;
                spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
                tgt_fcpreq->transferred_length = 0;
                tgt_fcpreq->fcp_error = -ECANCELED;
                tgt_fcpreq->done(tgt_fcpreq);
                return 0;
        }

        /*
         * if fcpreq is NULL, the I/O has been aborted (from
         * initiator side). For the target side, act as if all is well
         * but don't actually move data.
         */

        switch (op) {
        case NVMET_FCOP_WRITEDATA:
                xfrlen = tgt_fcpreq->transfer_length;
                if (fcpreq) {
                        fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
                                        fcpreq->first_sgl, tgt_fcpreq->offset,
                                        xfrlen);
                        fcpreq->transferred_length += xfrlen;
                }
                break;

        case NVMET_FCOP_READDATA:
        case NVMET_FCOP_READDATA_RSP:
                xfrlen = tgt_fcpreq->transfer_length;
                if (fcpreq) {
                        fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
                                        fcpreq->first_sgl, tgt_fcpreq->offset,
                                        xfrlen);
                        fcpreq->transferred_length += xfrlen;
                }
                if (op == NVMET_FCOP_READDATA)
                        break;

                /* Fall-Thru to RSP handling */
                fallthrough;

        case NVMET_FCOP_RSP:
                if (fcpreq) {
                        rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
                                        fcpreq->rsplen : tgt_fcpreq->rsplen);
                        memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
                        if (rsplen < tgt_fcpreq->rsplen)
                                fcp_err = -E2BIG;
                        fcpreq->rcv_rsplen = rsplen;
                        fcpreq->status = 0;
                }
                tfcp_req->status = 0;
                break;

        default:
                fcp_err = -EINVAL;
                break;
        }

        spin_lock_irqsave(&tfcp_req->reqlock, flags);
        tfcp_req->active = false;
        spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

        tgt_fcpreq->transferred_length = xfrlen;
        tgt_fcpreq->fcp_error = fcp_err;
        tgt_fcpreq->done(tgt_fcpreq);

        return 0;
}

static void
fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
                        struct nvmefc_tgt_fcp_req *tgt_fcpreq)
{
        struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
        unsigned long flags;

        /*
         * mark aborted only in case there were 2 threads in transport
         * (one doing io, other doing abort) and only kills ops posted
         * after the abort request
         */
        spin_lock_irqsave(&tfcp_req->reqlock, flags);
        tfcp_req->aborted = true;
        spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

        tfcp_req->status = NVME_SC_INTERNAL;

        /*
         * nothing more to do. If io wasn't active, the transport should
         * immediately call the req_release. If it was active, the op
         * will complete, and the lldd should call req_release.
         */
}

static void
fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
                        struct nvmefc_tgt_fcp_req *tgt_fcpreq)
{
        struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);

        queue_work(nvmet_wq, &tfcp_req->tio_done_work);
}

static void
fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
                        struct nvme_fc_remote_port *remoteport,
                                struct nvmefc_ls_req *lsreq)
{
}

static void
fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
                        void *hosthandle, struct nvmefc_ls_req *lsreq)
{
}

static void
fcloop_fcp_abort(struct nvme_fc_local_port *localport,
                        struct nvme_fc_remote_port *remoteport,
                        void *hw_queue_handle,
                        struct nvmefc_fcp_req *fcpreq)
{
        struct fcloop_ini_fcpreq *inireq = fcpreq->private;
        struct fcloop_fcpreq *tfcp_req;
        bool abortio = true;
        unsigned long flags;

        spin_lock(&inireq->inilock);
        tfcp_req = inireq->tfcp_req;
        if (tfcp_req) {
                if (!fcloop_tfcp_req_get(tfcp_req))
                        tfcp_req = NULL;
        }
        spin_unlock(&inireq->inilock);

        if (!tfcp_req) {
                /* abort has already been called */
                goto out_host_done;
        }

        /* break initiator/target relationship for io */
        spin_lock_irqsave(&tfcp_req->reqlock, flags);
        switch (tfcp_req->inistate) {
        case INI_IO_START:
        case INI_IO_ACTIVE:
                tfcp_req->inistate = INI_IO_ABORTED;
                break;
        case INI_IO_COMPLETED:
                abortio = false;
                break;
        default:
                spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
                WARN_ON(1);
                goto out_host_done;
        }
        spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

        if (abortio)
                /* leave the reference while the work item is scheduled */
                WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
        else  {
                /*
                 * as the io has already had the done callback made,
                 * nothing more to do. So release the reference taken above
                 */
                fcloop_tfcp_req_put(tfcp_req);
        }

        return;

out_host_done:
        fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
}

static void
fcloop_lport_put(struct fcloop_lport *lport)
{
        unsigned long flags;

        if (!refcount_dec_and_test(&lport->ref))
                return;

        spin_lock_irqsave(&fcloop_lock, flags);
        list_del(&lport->lport_list);
        spin_unlock_irqrestore(&fcloop_lock, flags);

        kfree(lport);
}

static int
fcloop_lport_get(struct fcloop_lport *lport)
{
        return refcount_inc_not_zero(&lport->ref);
}

static void
fcloop_nport_put(struct fcloop_nport *nport)
{
        unsigned long flags;

        if (!refcount_dec_and_test(&nport->ref))
                return;

        spin_lock_irqsave(&fcloop_lock, flags);
        list_del(&nport->nport_list);
        spin_unlock_irqrestore(&fcloop_lock, flags);

        if (nport->lport)
                fcloop_lport_put(nport->lport);

        kfree(nport);
}

static int
fcloop_nport_get(struct fcloop_nport *nport)
{
        return refcount_inc_not_zero(&nport->ref);
}

static void
fcloop_localport_delete(struct nvme_fc_local_port *localport)
{
        struct fcloop_lport_priv *lport_priv = localport->private;
        struct fcloop_lport *lport = lport_priv->lport;

        fcloop_lport_put(lport);
}

static void
fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{
        struct fcloop_rport *rport = remoteport->private;
        bool put_port = false;
        unsigned long flags;

        flush_work(&rport->ls_work);

        spin_lock_irqsave(&fcloop_lock, flags);
        if (!test_and_set_bit(PORT_DELETED, &rport->flags))
                put_port = true;
        rport->nport->rport = NULL;
        spin_unlock_irqrestore(&fcloop_lock, flags);

        if (put_port) {
                WARN_ON(!list_empty(&rport->ls_list));
                fcloop_nport_put(rport->nport);
        }
}

static void
fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
{
        struct fcloop_tport *tport = targetport->private;
        bool put_port = false;
        unsigned long flags;

        flush_work(&tport->ls_work);

        spin_lock_irqsave(&fcloop_lock, flags);
        if (!test_and_set_bit(PORT_DELETED, &tport->flags))
                put_port = true;
        tport->nport->tport = NULL;
        spin_unlock_irqrestore(&fcloop_lock, flags);

        if (put_port) {
                WARN_ON(!list_empty(&tport->ls_list));
                fcloop_nport_put(tport->nport);
        }
}

#define FCLOOP_HW_QUEUES                4
#define FCLOOP_SGL_SEGS                 256
#define FCLOOP_DMABOUND_4G              0xFFFFFFFF

static struct nvme_fc_port_template fctemplate = {
        .localport_delete       = fcloop_localport_delete,
        .remoteport_delete      = fcloop_remoteport_delete,
        .create_queue           = fcloop_create_queue,
        .delete_queue           = fcloop_delete_queue,
        .ls_req                 = fcloop_h2t_ls_req,
        .fcp_io                 = fcloop_fcp_req,
        .ls_abort               = fcloop_h2t_ls_abort,
        .fcp_abort              = fcloop_fcp_abort,
        .xmt_ls_rsp             = fcloop_t2h_xmt_ls_rsp,
        .max_hw_queues          = FCLOOP_HW_QUEUES,
        .max_sgl_segments       = FCLOOP_SGL_SEGS,
        .max_dif_sgl_segments   = FCLOOP_SGL_SEGS,
        .dma_boundary           = FCLOOP_DMABOUND_4G,
        /* sizes of additional private data for data structures */
        .local_priv_sz          = sizeof(struct fcloop_lport_priv),
        .remote_priv_sz         = sizeof(struct fcloop_rport),
        .fcprqst_priv_sz        = sizeof(struct fcloop_ini_fcpreq),
};

static struct nvmet_fc_target_template tgttemplate = {
        .targetport_delete      = fcloop_targetport_delete,
        .xmt_ls_rsp             = fcloop_h2t_xmt_ls_rsp,
        .fcp_op                 = fcloop_fcp_op,
        .fcp_abort              = fcloop_tgt_fcp_abort,
        .fcp_req_release        = fcloop_fcp_req_release,
        .discovery_event        = fcloop_tgt_discovery_evt,
        .ls_req                 = fcloop_t2h_ls_req,
        .ls_abort               = fcloop_t2h_ls_abort,
        .host_release           = fcloop_t2h_host_release,
        .host_traddr            = fcloop_t2h_host_traddr,
        .max_hw_queues          = FCLOOP_HW_QUEUES,
        .max_sgl_segments       = FCLOOP_SGL_SEGS,
        .max_dif_sgl_segments   = FCLOOP_SGL_SEGS,
        .dma_boundary           = FCLOOP_DMABOUND_4G,
        /* optional features */
        .target_features        = 0,
        /* sizes of additional private data for data structures */
        .target_priv_sz         = sizeof(struct fcloop_tport),
};

static ssize_t
fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count)
{
        struct nvme_fc_port_info pinfo;
        struct fcloop_ctrl_options *opts;
        struct nvme_fc_local_port *localport;
        struct fcloop_lport *lport;
        struct fcloop_lport_priv *lport_priv;
        unsigned long flags;
        int ret = -ENOMEM;

        lport = kzalloc_obj(*lport);
        if (!lport)
                return -ENOMEM;

        opts = kzalloc_obj(*opts);
        if (!opts)
                goto out_free_lport;

        ret = fcloop_parse_options(opts, buf);
        if (ret)
                goto out_free_opts;

        /* everything there ? */
        if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
                ret = -EINVAL;
                goto out_free_opts;
        }

        memset(&pinfo, 0, sizeof(pinfo));
        pinfo.node_name = opts->wwnn;
        pinfo.port_name = opts->wwpn;
        pinfo.port_role = opts->roles;
        pinfo.port_id = opts->fcaddr;

        ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
        if (!ret) {
                /* success */
                lport_priv = localport->private;
                lport_priv->lport = lport;

                lport->localport = localport;
                INIT_LIST_HEAD(&lport->lport_list);
                refcount_set(&lport->ref, 1);

                spin_lock_irqsave(&fcloop_lock, flags);
                list_add_tail(&lport->lport_list, &fcloop_lports);
                spin_unlock_irqrestore(&fcloop_lock, flags);
        }

out_free_opts:
        kfree(opts);
out_free_lport:
        /* free only if we're going to fail */
        if (ret)
                kfree(lport);

        return ret ? ret : count;
}

static int
__localport_unreg(struct fcloop_lport *lport)
{
        return nvme_fc_unregister_localport(lport->localport);
}

static struct fcloop_nport *
__fcloop_nport_lookup(u64 node_name, u64 port_name)
{
        struct fcloop_nport *nport;

        list_for_each_entry(nport, &fcloop_nports, nport_list) {
                if (nport->node_name != node_name ||
                    nport->port_name != port_name)
                        continue;

                if (fcloop_nport_get(nport))
                        return nport;

                break;
        }

        return NULL;
}

static struct fcloop_nport *
fcloop_nport_lookup(u64 node_name, u64 port_name)
{
        struct fcloop_nport *nport;
        unsigned long flags;

        spin_lock_irqsave(&fcloop_lock, flags);
        nport = __fcloop_nport_lookup(node_name, port_name);
        spin_unlock_irqrestore(&fcloop_lock, flags);

        return nport;
}

static struct fcloop_lport *
__fcloop_lport_lookup(u64 node_name, u64 port_name)
{
        struct fcloop_lport *lport;

        list_for_each_entry(lport, &fcloop_lports, lport_list) {
                if (lport->localport->node_name != node_name ||
                    lport->localport->port_name != port_name)
                        continue;

                if (fcloop_lport_get(lport))
                        return lport;

                break;
        }

        return NULL;
}

static struct fcloop_lport *
fcloop_lport_lookup(u64 node_name, u64 port_name)
{
        struct fcloop_lport *lport;
        unsigned long flags;

        spin_lock_irqsave(&fcloop_lock, flags);
        lport = __fcloop_lport_lookup(node_name, port_name);
        spin_unlock_irqrestore(&fcloop_lock, flags);

        return lport;
}

static ssize_t
fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count)
{
        struct fcloop_lport *lport;
        u64 nodename, portname;
        int ret;

        ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
        if (ret)
                return ret;

        lport = fcloop_lport_lookup(nodename, portname);
        if (!lport)
                return -ENOENT;

        ret = __localport_unreg(lport);
        fcloop_lport_put(lport);

        return ret ? ret : count;
}

static struct fcloop_nport *
fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
{
        struct fcloop_nport *newnport, *nport;
        struct fcloop_lport *lport;
        struct fcloop_ctrl_options *opts;
        unsigned long flags;
        u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
        int ret;

        opts = kzalloc_obj(*opts);
        if (!opts)
                return NULL;

        ret = fcloop_parse_options(opts, buf);
        if (ret)
                goto out_free_opts;

        /* everything there ? */
        if ((opts->mask & opts_mask) != opts_mask)
                goto out_free_opts;

        newnport = kzalloc_obj(*newnport);
        if (!newnport)
                goto out_free_opts;

        INIT_LIST_HEAD(&newnport->nport_list);
        newnport->node_name = opts->wwnn;
        newnport->port_name = opts->wwpn;
        if (opts->mask & NVMF_OPT_ROLES)
                newnport->port_role = opts->roles;
        if (opts->mask & NVMF_OPT_FCADDR)
                newnport->port_id = opts->fcaddr;
        refcount_set(&newnport->ref, 1);

        spin_lock_irqsave(&fcloop_lock, flags);
        lport = __fcloop_lport_lookup(opts->wwnn, opts->wwpn);
        if (lport) {
                /* invalid configuration */
                fcloop_lport_put(lport);
                goto out_free_newnport;
        }

        if (remoteport) {
                lport = __fcloop_lport_lookup(opts->lpwwnn, opts->lpwwpn);
                if (!lport) {
                        /* invalid configuration */
                        goto out_free_newnport;
                }
        }

        nport = __fcloop_nport_lookup(opts->wwnn, opts->wwpn);
        if (nport) {
                if ((remoteport && nport->rport) ||
                    (!remoteport && nport->tport)) {
                        /* invalid configuration */
                        goto out_put_nport;
                }

                /* found existing nport, discard the new nport */
                kfree(newnport);
        } else {
                list_add_tail(&newnport->nport_list, &fcloop_nports);
                nport = newnport;
        }

        if (opts->mask & NVMF_OPT_ROLES)
                nport->port_role = opts->roles;
        if (opts->mask & NVMF_OPT_FCADDR)
                nport->port_id = opts->fcaddr;
        if (lport) {
                if (!nport->lport)
                        nport->lport = lport;
                else
                        fcloop_lport_put(lport);
        }
        spin_unlock_irqrestore(&fcloop_lock, flags);

        kfree(opts);
        return nport;

out_put_nport:
        if (lport)
                fcloop_lport_put(lport);
        fcloop_nport_put(nport);
out_free_newnport:
        spin_unlock_irqrestore(&fcloop_lock, flags);
        kfree(newnport);
out_free_opts:
        kfree(opts);
        return NULL;
}

static ssize_t
fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count)
{
        struct nvme_fc_remote_port *remoteport;
        struct fcloop_nport *nport;
        struct fcloop_rport *rport;
        struct nvme_fc_port_info pinfo;
        int ret;

        nport = fcloop_alloc_nport(buf, count, true);
        if (!nport)
                return -EIO;

        memset(&pinfo, 0, sizeof(pinfo));
        pinfo.node_name = nport->node_name;
        pinfo.port_name = nport->port_name;
        pinfo.port_role = nport->port_role;
        pinfo.port_id = nport->port_id;

        ret = nvme_fc_register_remoteport(nport->lport->localport,
                                                &pinfo, &remoteport);
        if (ret || !remoteport) {
                fcloop_nport_put(nport);
                return ret;
        }

        /* success */
        rport = remoteport->private;
        rport->remoteport = remoteport;
        rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
        if (nport->tport) {
                nport->tport->remoteport = remoteport;
                nport->tport->lport = nport->lport;
        }
        rport->nport = nport;
        rport->lport = nport->lport;
        nport->rport = rport;
        rport->flags = 0;
        spin_lock_init(&rport->lock);
        INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
        INIT_LIST_HEAD(&rport->ls_list);

        return count;
}


static struct fcloop_rport *
__unlink_remote_port(struct fcloop_nport *nport)
{
        struct fcloop_rport *rport = nport->rport;

        lockdep_assert_held(&fcloop_lock);

        if (rport && nport->tport)
                nport->tport->remoteport = NULL;
        nport->rport = NULL;

        return rport;
}

static int
__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
{
        return nvme_fc_unregister_remoteport(rport->remoteport);
}

static ssize_t
fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count)
{
        struct fcloop_nport *nport;
        struct fcloop_rport *rport;
        u64 nodename, portname;
        unsigned long flags;
        int ret;

        ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
        if (ret)
                return ret;

        nport = fcloop_nport_lookup(nodename, portname);
        if (!nport)
                return -ENOENT;

        spin_lock_irqsave(&fcloop_lock, flags);
        rport = __unlink_remote_port(nport);
        spin_unlock_irqrestore(&fcloop_lock, flags);

        if (!rport) {
                ret = -ENOENT;
                goto out_nport_put;
        }

        ret = __remoteport_unreg(nport, rport);

out_nport_put:
        fcloop_nport_put(nport);

        return ret ? ret : count;
}

static ssize_t
fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count)
{
        struct nvmet_fc_target_port *targetport;
        struct fcloop_nport *nport;
        struct fcloop_tport *tport;
        struct nvmet_fc_port_info tinfo;
        int ret;

        nport = fcloop_alloc_nport(buf, count, false);
        if (!nport)
                return -EIO;

        tinfo.node_name = nport->node_name;
        tinfo.port_name = nport->port_name;
        tinfo.port_id = nport->port_id;

        ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
                                                &targetport);
        if (ret) {
                fcloop_nport_put(nport);
                return ret;
        }

        /* success */
        tport = targetport->private;
        tport->targetport = targetport;
        tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
        if (nport->rport)
                nport->rport->targetport = targetport;
        tport->nport = nport;
        tport->lport = nport->lport;
        nport->tport = tport;
        tport->flags = 0;
        spin_lock_init(&tport->lock);
        INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
        INIT_LIST_HEAD(&tport->ls_list);

        return count;
}


static struct fcloop_tport *
__unlink_target_port(struct fcloop_nport *nport)
{
        struct fcloop_tport *tport = nport->tport;

        lockdep_assert_held(&fcloop_lock);

        if (tport && nport->rport)
                nport->rport->targetport = NULL;
        nport->tport = NULL;

        return tport;
}

static int
__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
{
        return nvmet_fc_unregister_targetport(tport->targetport);
}

static ssize_t
fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count)
{
        struct fcloop_nport *nport;
        struct fcloop_tport *tport;
        u64 nodename, portname;
        unsigned long flags;
        int ret;

        ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
        if (ret)
                return ret;

        nport = fcloop_nport_lookup(nodename, portname);
        if (!nport)
                return -ENOENT;

        spin_lock_irqsave(&fcloop_lock, flags);
        tport = __unlink_target_port(nport);
        spin_unlock_irqrestore(&fcloop_lock, flags);

        if (!tport) {
                ret = -ENOENT;
                goto out_nport_put;
        }

        ret = __targetport_unreg(nport, tport);

out_nport_put:
        fcloop_nport_put(nport);

        return ret ? ret : count;
}

static ssize_t
fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count)
{
        unsigned int opcode;
        int starting, amount;

        if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
                return -EBADRQC;

        drop_current_cnt = 0;
        drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false;
        drop_opcode = (opcode & DROP_OPCODE_MASK);
        drop_instance = starting;
        /* the check to drop routine uses instance + count to know when
         * to end. Thus, if dropping 1 instance, count should be 0.
         * so subtract 1 from the count.
         */
        drop_amount = amount - 1;

        pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
                "instances\n",
                __func__, drop_instance, drop_fabric_opcode ? " fabric" : "",
                drop_opcode, drop_amount);

        return count;
}


static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop);

static struct attribute *fcloop_dev_attrs[] = {
        &dev_attr_add_local_port.attr,
        &dev_attr_del_local_port.attr,
        &dev_attr_add_remote_port.attr,
        &dev_attr_del_remote_port.attr,
        &dev_attr_add_target_port.attr,
        &dev_attr_del_target_port.attr,
        &dev_attr_set_cmd_drop.attr,
        NULL
};

static const struct attribute_group fclopp_dev_attrs_group = {
        .attrs          = fcloop_dev_attrs,
};

static const struct attribute_group *fcloop_dev_attr_groups[] = {
        &fclopp_dev_attrs_group,
        NULL,
};

static const struct class fcloop_class = {
        .name = "fcloop",
};
static struct device *fcloop_device;

static int __init fcloop_init(void)
{
        int ret;

        lsreq_cache = kmem_cache_create("lsreq_cache",
                                sizeof(struct fcloop_lsreq), 0,
                                0, NULL);
        if (!lsreq_cache)
                return -ENOMEM;

        ret = class_register(&fcloop_class);
        if (ret) {
                pr_err("couldn't register class fcloop\n");
                goto out_destroy_cache;
        }

        fcloop_device = device_create_with_groups(
                                &fcloop_class, NULL, MKDEV(0, 0), NULL,
                                fcloop_dev_attr_groups, "ctl");
        if (IS_ERR(fcloop_device)) {
                pr_err("couldn't create ctl device!\n");
                ret = PTR_ERR(fcloop_device);
                goto out_destroy_class;
        }

        get_device(fcloop_device);

        return 0;

out_destroy_class:
        class_unregister(&fcloop_class);
out_destroy_cache:
        kmem_cache_destroy(lsreq_cache);
        return ret;
}

static void __exit fcloop_exit(void)
{
        struct fcloop_lport *lport;
        struct fcloop_nport *nport;
        struct fcloop_tport *tport;
        struct fcloop_rport *rport;
        unsigned long flags;
        int ret;

        spin_lock_irqsave(&fcloop_lock, flags);

        for (;;) {
                nport = list_first_entry_or_null(&fcloop_nports,
                                                typeof(*nport), nport_list);
                if (!nport || !fcloop_nport_get(nport))
                        break;

                tport = __unlink_target_port(nport);
                rport = __unlink_remote_port(nport);

                spin_unlock_irqrestore(&fcloop_lock, flags);

                if (tport) {
                        ret = __targetport_unreg(nport, tport);
                        if (ret)
                                pr_warn("%s: Failed deleting target port\n",
                                        __func__);
                }

                if (rport) {
                        ret = __remoteport_unreg(nport, rport);
                        if (ret)
                                pr_warn("%s: Failed deleting remote port\n",
                                        __func__);
                }

                fcloop_nport_put(nport);

                spin_lock_irqsave(&fcloop_lock, flags);
        }

        for (;;) {
                lport = list_first_entry_or_null(&fcloop_lports,
                                                typeof(*lport), lport_list);
                if (!lport || !fcloop_lport_get(lport))
                        break;

                spin_unlock_irqrestore(&fcloop_lock, flags);

                ret = __localport_unreg(lport);
                if (ret)
                        pr_warn("%s: Failed deleting local port\n", __func__);

                fcloop_lport_put(lport);

                spin_lock_irqsave(&fcloop_lock, flags);
        }

        spin_unlock_irqrestore(&fcloop_lock, flags);

        put_device(fcloop_device);

        device_destroy(&fcloop_class, MKDEV(0, 0));
        class_unregister(&fcloop_class);
        kmem_cache_destroy(lsreq_cache);
}

module_init(fcloop_init);
module_exit(fcloop_exit);

MODULE_DESCRIPTION("NVMe target FC loop transport driver");
MODULE_LICENSE("GPL v2");