#ifndef __NVME_INTERNAL_H__
#define __NVME_INTERNAL_H__
#include "nvme_common.h"
#include "nvme_pci.h"
#include "nvme_intel.h"
#include "nvme_mem.h"
#ifndef __HAIKU__
#include <pthread.h>
#include <sys/user.h>
#else
#include "nvme_platform.h"
#endif
#define LIST_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = LIST_FIRST((head)); \
(var) && ((tvar) = LIST_NEXT((var), field), 1); \
(var) = (tvar))
#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = TAILQ_FIRST((head)); \
(var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
(var) = (tvar))
#define INTEL_DC_P3X00_DEVID 0x09538086
#define NVME_TIMEOUT_INFINITE UINT64_MAX
#define NVME_INTEL_QUIRK_READ_LATENCY 0x1
#define NVME_INTEL_QUIRK_WRITE_LATENCY 0x2
#define NVME_QUIRK_DELAY_BEFORE_CHK_RDY 0x4
#define NVME_QUIRK_DELAY_AFTER_RDY 0x8
#define NVME_MAX_PRP_LIST_ENTRIES (506)
#define NVME_MAX_XFER_SIZE NVME_MAX_PRP_LIST_ENTRIES * PAGE_SIZE
#define NVME_ADMIN_TRACKERS (16)
#define NVME_ADMIN_ENTRIES (128)
#define NVME_IO_ENTRIES (1024U)
#define NVME_IO_TRACKERS (128U)
#define NVME_IO_ENTRIES_VS_TRACKERS_RATIO (NVME_IO_ENTRIES / NVME_IO_TRACKERS)
#define NVME_MAX_SGL_DESCRIPTORS (253)
#define NVME_MAX_ASYNC_EVENTS (8)
#define DEFAULT_MAX_IO_QUEUES (1024)
#define NVME_MAX_RETRY_COUNT (3)
enum nvme_io_queue_type {
NVME_IO_QTYPE_INVALID = 0,
NVME_IO_SUBMISSION_QUEUE,
NVME_IO_COMPLETION_QUEUE,
};
enum nvme_payload_type {
NVME_PAYLOAD_TYPE_INVALID = 0,
NVME_PAYLOAD_TYPE_CONTIG,
NVME_PAYLOAD_TYPE_SGL,
};
enum nvme_ctrlr_flags {
NVME_CTRLR_SGL_SUPPORTED = 0x1,
};
struct __attribute__((packed)) nvme_payload {
union {
void *contig;
struct {
nvme_req_reset_sgl_cb reset_sgl_fn;
nvme_req_next_sge_cb next_sge_fn;
void *cb_arg;
} sgl;
} u;
void *md;
uint8_t type;
};
struct nvme_request {
struct nvme_cmd cmd;
struct nvme_payload payload;
uint8_t retries;
uint8_t child_reqs;
uint32_t payload_size;
uint32_t payload_offset;
uint32_t md_offset;
nvme_cmd_cb cb_fn;
void *cb_arg;
TAILQ_HEAD(, nvme_request) children;
TAILQ_ENTRY(nvme_request) child_tailq;
struct nvme_qpair *qpair;
STAILQ_ENTRY(nvme_request) stailq;
struct nvme_request *parent;
struct nvme_cpl parent_status;
} __attribute__((aligned(64)));
struct nvme_completion_poll_status {
struct nvme_cpl cpl;
bool done;
};
struct nvme_async_event_request {
struct nvme_ctrlr *ctrlr;
struct nvme_request *req;
struct nvme_cpl cpl;
};
struct nvme_tracker {
LIST_ENTRY(nvme_tracker) list;
struct nvme_request *req;
#if INTPTR_MAX == INT32_MAX
int32_t __pad[3];
#elif !defined(INTPTR_MAX)
# error Need definition of INTPTR_MAX!
#endif
uint16_t cid;
uint16_t rsvd1: 15;
uint16_t active: 1;
uint32_t rsvd2;
uint64_t prp_sgl_bus_addr;
union {
uint64_t prp[NVME_MAX_PRP_LIST_ENTRIES];
struct nvme_sgl_descriptor sgl[NVME_MAX_SGL_DESCRIPTORS];
} u;
uint64_t rsvd3;
};
nvme_static_assert(sizeof(struct nvme_tracker) == 4096,
"nvme_tracker is not 4K");
nvme_static_assert((offsetof(struct nvme_tracker, u.sgl) & 7) == 0,
"SGL must be Qword aligned");
struct nvme_qpair {
pthread_mutex_t lock;
volatile uint32_t *sq_tdbl;
volatile uint32_t *cq_hdbl;
struct nvme_cmd *cmd;
struct nvme_cpl *cpl;
LIST_HEAD(, nvme_tracker) free_tr;
LIST_HEAD(, nvme_tracker) outstanding_tr;
uint16_t trackers;
struct nvme_tracker *tr;
struct nvme_request *reqs;
unsigned int num_reqs;
STAILQ_HEAD(, nvme_request) free_req;
STAILQ_HEAD(, nvme_request) queued_req;
uint16_t id;
uint16_t entries;
uint16_t sq_tail;
uint16_t cq_head;
uint8_t phase;
bool enabled;
bool sq_in_cmb;
uint8_t qprio;
struct nvme_ctrlr *ctrlr;
TAILQ_ENTRY(nvme_qpair) tailq;
phys_addr_t cmd_bus_addr;
phys_addr_t cpl_bus_addr;
};
struct nvme_ns {
struct nvme_ctrlr *ctrlr;
uint32_t stripe_size;
uint32_t sector_size;
uint32_t md_size;
uint32_t pi_type;
uint32_t sectors_per_max_io;
uint32_t sectors_per_stripe;
uint16_t id;
uint16_t flags;
int open_count;
};
enum nvme_ctrlr_state {
NVME_CTRLR_STATE_INIT = 0,
NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
NVME_CTRLR_STATE_READY
};
struct nvme_ctrlr {
volatile struct nvme_registers *regs;
struct nvme_qpair *ioq;
unsigned int io_queues;
unsigned int max_io_queues;
unsigned int enabled_io_qpairs;
unsigned int io_qpairs_max_entries;
unsigned int nr_ns;
struct nvme_ns *ns;
bool resetting;
bool failed;
uint64_t flags;
enum nvme_ctrlr_state state;
uint64_t state_timeout_ms;
bool log_page_supported[256];
bool feature_supported[256];
struct pci_device *pci_dev;
uint32_t max_xfer_size;
uint32_t min_page_size;
uint32_t doorbell_stride_u32;
uint32_t num_aers;
struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
nvme_aer_cb aer_cb_fn;
void *aer_cb_arg;
struct nvme_qpair adminq;
pthread_mutex_t lock;
struct nvme_ctrlr_data cdata;
struct nvme_ns_data *nsdata;
TAILQ_HEAD(, nvme_qpair) free_io_qpairs;
TAILQ_HEAD(, nvme_qpair) active_io_qpairs;
struct nvme_ctrlr_opts opts;
void *cmb_bar_virt_addr;
uint64_t cmb_bar_phys_addr;
uint64_t cmb_size;
uint64_t cmb_current_offset;
unsigned int quirks;
LIST_ENTRY(nvme_ctrlr) link;
} __attribute__((aligned(PAGE_SIZE)));
extern int nvme_admin_identify_ctrlr(struct nvme_ctrlr *ctrlr,
struct nvme_ctrlr_data *cdata);
extern int nvme_admin_get_feature(struct nvme_ctrlr *ctrlr,
enum nvme_feat_sel sel,
enum nvme_feat feature,
uint32_t cdw11, uint32_t *attributes);
extern int nvme_admin_set_feature(struct nvme_ctrlr *ctrlr,
bool save,
enum nvme_feat feature,
uint32_t cdw11, uint32_t cdw12,
uint32_t cdw13, uint32_t cdw14, uint32_t cdw15,
void *buf, uint32_t len,
uint32_t *attributes);
extern int nvme_admin_format_nvm(struct nvme_ctrlr *ctrlr,
unsigned int nsid,
struct nvme_format *format);
extern int nvme_admin_get_log_page(struct nvme_ctrlr *ctrlr,
uint8_t log_page, uint32_t nsid,
void *payload, uint32_t payload_size);
extern int nvme_admin_abort_cmd(struct nvme_ctrlr *ctrlr,
uint16_t cid, uint16_t sqid);
extern int nvme_admin_create_ioq(struct nvme_ctrlr *ctrlr,
struct nvme_qpair *io_que,
enum nvme_io_queue_type io_qtype);
extern int nvme_admin_delete_ioq(struct nvme_ctrlr *ctrlr,
struct nvme_qpair *qpair,
enum nvme_io_queue_type io_qtype);
extern int nvme_admin_identify_ns(struct nvme_ctrlr *ctrlr,
uint16_t nsid,
struct nvme_ns_data *nsdata);
extern int nvme_admin_attach_ns(struct nvme_ctrlr *ctrlr,
uint32_t nsid,
struct nvme_ctrlr_list *clist);
extern int nvme_admin_detach_ns(struct nvme_ctrlr *ctrlr,
uint32_t nsid,
struct nvme_ctrlr_list *clist);
extern int nvme_admin_create_ns(struct nvme_ctrlr *ctrlr,
struct nvme_ns_data *nsdata,
unsigned int *nsid);
extern int nvme_admin_delete_ns(struct nvme_ctrlr *ctrlr,
unsigned int nsid);
extern int nvme_admin_fw_commit(struct nvme_ctrlr *ctrlr,
const struct nvme_fw_commit *fw_commit);
extern int nvme_admin_fw_image_dl(struct nvme_ctrlr *ctrlr,
void *fw, uint32_t size, uint32_t offset);
extern void nvme_request_completion_poll_cb(void *arg,
const struct nvme_cpl *cpl);
extern struct nvme_ctrlr *nvme_ctrlr_attach(struct pci_device *pci_dev,
struct nvme_ctrlr_opts *opts);
extern void nvme_ctrlr_detach(struct nvme_ctrlr *ctrlr);
extern int nvme_qpair_construct(struct nvme_ctrlr *ctrlr,
struct nvme_qpair *qpair, enum nvme_qprio qprio,
uint16_t entries, uint16_t trackers);
extern void nvme_qpair_destroy(struct nvme_qpair *qpair);
extern void nvme_qpair_enable(struct nvme_qpair *qpair);
extern void nvme_qpair_disable(struct nvme_qpair *qpair);
extern int nvme_qpair_submit_request(struct nvme_qpair *qpair,
struct nvme_request *req);
extern void nvme_qpair_reset(struct nvme_qpair *qpair);
extern void nvme_qpair_fail(struct nvme_qpair *qpair);
extern int nvme_request_pool_construct(struct nvme_qpair *qpair);
extern void nvme_request_pool_destroy(struct nvme_qpair *qpair);
extern struct nvme_request *nvme_request_allocate(struct nvme_qpair *qpair,
const struct nvme_payload *payload, uint32_t payload_size,
nvme_cmd_cb cb_fn, void *cb_arg);
extern struct nvme_request *nvme_request_allocate_null(struct nvme_qpair *qpair,
nvme_cmd_cb cb_fn,
void *cb_arg);
extern struct nvme_request *
nvme_request_allocate_contig(struct nvme_qpair *qpair,
void *buffer, uint32_t payload_size,
nvme_cmd_cb cb_fn, void *cb_arg);
extern void nvme_request_free(struct nvme_request *req);
extern void nvme_request_free_locked(struct nvme_request *req);
extern void nvme_request_add_child(struct nvme_request *parent,
struct nvme_request *child);
extern void nvme_request_remove_child(struct nvme_request *parent,
struct nvme_request *child);
extern unsigned int nvme_ctrlr_get_quirks(struct pci_device *pdev);
extern int nvme_ns_construct(struct nvme_ctrlr *ctrlr,
struct nvme_ns *ns, unsigned int id);
#define nvme_reg_mmio_read_4(sc, reg) \
nvme_mmio_read_4((__u32 *)&(sc)->regs->reg)
#define nvme_reg_mmio_read_8(sc, reg) \
nvme_mmio_read_8((__u64 *)&(sc)->regs->reg)
#define nvme_reg_mmio_write_4(sc, reg, val) \
nvme_mmio_write_4((__u32 *)&(sc)->regs->reg, val)
#define nvme_reg_mmio_write_8(sc, reg, val) \
nvme_mmio_write_8((__u64 *)&(sc)->regs->reg, val)
#endif