#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
#include <rdma/rw.h>
enum smbdirect_socket_status {
SMBDIRECT_SOCKET_CREATED,
SMBDIRECT_SOCKET_RESOLVE_ADDR_NEEDED,
SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING,
SMBDIRECT_SOCKET_RESOLVE_ADDR_FAILED,
SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED,
SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING,
SMBDIRECT_SOCKET_RESOLVE_ROUTE_FAILED,
SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED,
SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING,
SMBDIRECT_SOCKET_RDMA_CONNECT_FAILED,
SMBDIRECT_SOCKET_NEGOTIATE_NEEDED,
SMBDIRECT_SOCKET_NEGOTIATE_RUNNING,
SMBDIRECT_SOCKET_NEGOTIATE_FAILED,
SMBDIRECT_SOCKET_CONNECTED,
SMBDIRECT_SOCKET_ERROR,
SMBDIRECT_SOCKET_DISCONNECTING,
SMBDIRECT_SOCKET_DISCONNECTED,
SMBDIRECT_SOCKET_DESTROYED
};
static __always_inline
const char *smbdirect_socket_status_string(enum smbdirect_socket_status status)
{
switch (status) {
case SMBDIRECT_SOCKET_CREATED:
return "CREATED";
case SMBDIRECT_SOCKET_RESOLVE_ADDR_NEEDED:
return "RESOLVE_ADDR_NEEDED";
case SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING:
return "RESOLVE_ADDR_RUNNING";
case SMBDIRECT_SOCKET_RESOLVE_ADDR_FAILED:
return "RESOLVE_ADDR_FAILED";
case SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED:
return "RESOLVE_ROUTE_NEEDED";
case SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING:
return "RESOLVE_ROUTE_RUNNING";
case SMBDIRECT_SOCKET_RESOLVE_ROUTE_FAILED:
return "RESOLVE_ROUTE_FAILED";
case SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED:
return "RDMA_CONNECT_NEEDED";
case SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING:
return "RDMA_CONNECT_RUNNING";
case SMBDIRECT_SOCKET_RDMA_CONNECT_FAILED:
return "RDMA_CONNECT_FAILED";
case SMBDIRECT_SOCKET_NEGOTIATE_NEEDED:
return "NEGOTIATE_NEEDED";
case SMBDIRECT_SOCKET_NEGOTIATE_RUNNING:
return "NEGOTIATE_RUNNING";
case SMBDIRECT_SOCKET_NEGOTIATE_FAILED:
return "NEGOTIATE_FAILED";
case SMBDIRECT_SOCKET_CONNECTED:
return "CONNECTED";
case SMBDIRECT_SOCKET_ERROR:
return "ERROR";
case SMBDIRECT_SOCKET_DISCONNECTING:
return "DISCONNECTING";
case SMBDIRECT_SOCKET_DISCONNECTED:
return "DISCONNECTED";
case SMBDIRECT_SOCKET_DESTROYED:
return "DESTROYED";
}
return "<unknown>";
}
static __always_inline
const void * __must_check SMBDIRECT_DEBUG_ERR_PTR(long error)
{
if (error == 0)
return NULL;
return ERR_PTR(error);
}
enum smbdirect_keepalive_status {
SMBDIRECT_KEEPALIVE_NONE,
SMBDIRECT_KEEPALIVE_PENDING,
SMBDIRECT_KEEPALIVE_SENT
};
struct smbdirect_socket {
enum smbdirect_socket_status status;
wait_queue_head_t status_wait;
int first_error;
struct workqueue_struct *workqueue;
struct work_struct disconnect_work;
struct {
struct rdma_cm_id *cm_id;
bool legacy_iwarp;
} rdma;
struct {
struct ib_pd *pd;
struct ib_cq *send_cq;
struct ib_cq *recv_cq;
struct ib_qp *qp;
struct ib_device *dev;
} ib;
struct smbdirect_socket_parameters parameters;
struct {
spinlock_t lock;
struct work_struct work;
} connect;
struct {
enum smbdirect_keepalive_status keepalive;
struct work_struct immediate_work;
struct delayed_work timer_work;
} idle;
struct {
struct {
struct kmem_cache *cache;
mempool_t *pool;
} mem;
struct {
atomic_t count;
wait_queue_head_t wait_queue;
} bcredits;
struct {
atomic_t count;
wait_queue_head_t wait_queue;
} lcredits;
struct {
atomic_t count;
wait_queue_head_t wait_queue;
} credits;
struct {
atomic_t count;
wait_queue_head_t dec_wait_queue;
wait_queue_head_t zero_wait_queue;
} pending;
} send_io;
struct {
enum {
SMBDIRECT_EXPECT_NEGOTIATE_REQ = 1,
SMBDIRECT_EXPECT_NEGOTIATE_REP = 2,
SMBDIRECT_EXPECT_DATA_TRANSFER = 3,
} expected;
struct {
struct kmem_cache *cache;
mempool_t *pool;
} mem;
struct {
struct list_head list;
spinlock_t lock;
} free;
struct {
atomic_t count;
struct work_struct refill_work;
} posted;
struct {
u16 target;
atomic_t available;
atomic_t count;
} credits;
struct {
struct list_head list;
spinlock_t lock;
wait_queue_head_t wait_queue;
int data_length;
int queue_length;
int first_entry_offset;
bool full_packet_received;
} reassembly;
} recv_io;
struct {
enum ib_mr_type type;
struct {
struct list_head list;
spinlock_t lock;
} all;
struct {
atomic_t count;
wait_queue_head_t wait_queue;
} ready;
struct {
atomic_t count;
} used;
struct work_struct recovery_work;
struct {
wait_queue_head_t wait_queue;
} cleanup;
} mr_io;
struct {
struct {
size_t max;
size_t num_pages;
atomic_t count;
wait_queue_head_t wait_queue;
} credits;
} rw_io;
struct {
u64 get_receive_buffer;
u64 put_receive_buffer;
u64 enqueue_reassembly_queue;
u64 dequeue_reassembly_queue;
u64 send_empty;
} statistics;
};
static void __smbdirect_socket_disabled_work(struct work_struct *work)
{
WARN_ON_ONCE(1);
}
static __always_inline void smbdirect_socket_init(struct smbdirect_socket *sc)
{
BUILD_BUG_ON(SMBDIRECT_SOCKET_CREATED != 0);
memset(sc, 0, sizeof(*sc));
init_waitqueue_head(&sc->status_wait);
INIT_WORK(&sc->disconnect_work, __smbdirect_socket_disabled_work);
disable_work_sync(&sc->disconnect_work);
spin_lock_init(&sc->connect.lock);
INIT_WORK(&sc->connect.work, __smbdirect_socket_disabled_work);
disable_work_sync(&sc->connect.work);
INIT_WORK(&sc->idle.immediate_work, __smbdirect_socket_disabled_work);
disable_work_sync(&sc->idle.immediate_work);
INIT_DELAYED_WORK(&sc->idle.timer_work, __smbdirect_socket_disabled_work);
disable_delayed_work_sync(&sc->idle.timer_work);
atomic_set(&sc->send_io.bcredits.count, 0);
init_waitqueue_head(&sc->send_io.bcredits.wait_queue);
atomic_set(&sc->send_io.lcredits.count, 0);
init_waitqueue_head(&sc->send_io.lcredits.wait_queue);
atomic_set(&sc->send_io.credits.count, 0);
init_waitqueue_head(&sc->send_io.credits.wait_queue);
atomic_set(&sc->send_io.pending.count, 0);
init_waitqueue_head(&sc->send_io.pending.dec_wait_queue);
init_waitqueue_head(&sc->send_io.pending.zero_wait_queue);
INIT_LIST_HEAD(&sc->recv_io.free.list);
spin_lock_init(&sc->recv_io.free.lock);
atomic_set(&sc->recv_io.posted.count, 0);
INIT_WORK(&sc->recv_io.posted.refill_work, __smbdirect_socket_disabled_work);
disable_work_sync(&sc->recv_io.posted.refill_work);
atomic_set(&sc->recv_io.credits.available, 0);
atomic_set(&sc->recv_io.credits.count, 0);
INIT_LIST_HEAD(&sc->recv_io.reassembly.list);
spin_lock_init(&sc->recv_io.reassembly.lock);
init_waitqueue_head(&sc->recv_io.reassembly.wait_queue);
atomic_set(&sc->rw_io.credits.count, 0);
init_waitqueue_head(&sc->rw_io.credits.wait_queue);
spin_lock_init(&sc->mr_io.all.lock);
INIT_LIST_HEAD(&sc->mr_io.all.list);
atomic_set(&sc->mr_io.ready.count, 0);
init_waitqueue_head(&sc->mr_io.ready.wait_queue);
atomic_set(&sc->mr_io.used.count, 0);
INIT_WORK(&sc->mr_io.recovery_work, __smbdirect_socket_disabled_work);
disable_work_sync(&sc->mr_io.recovery_work);
init_waitqueue_head(&sc->mr_io.cleanup.wait_queue);
}
#define __SMBDIRECT_CHECK_STATUS_FAILED(__sc, __expected_status, __error_cmd, __unexpected_cmd) ({ \
bool __failed = false; \
if (unlikely((__sc)->first_error)) { \
__failed = true; \
__error_cmd \
} else if (unlikely((__sc)->status != (__expected_status))) { \
__failed = true; \
__unexpected_cmd \
} \
__failed; \
})
#define __SMBDIRECT_CHECK_STATUS_WARN(__sc, __expected_status, __unexpected_cmd) \
__SMBDIRECT_CHECK_STATUS_FAILED(__sc, __expected_status, \
, \
{ \
const struct sockaddr_storage *__src = NULL; \
const struct sockaddr_storage *__dst = NULL; \
if ((__sc)->rdma.cm_id) { \
__src = &(__sc)->rdma.cm_id->route.addr.src_addr; \
__dst = &(__sc)->rdma.cm_id->route.addr.dst_addr; \
} \
WARN_ONCE(1, \
"expected[%s] != %s first_error=%1pe local=%pISpsfc remote=%pISpsfc\n", \
smbdirect_socket_status_string(__expected_status), \
smbdirect_socket_status_string((__sc)->status), \
SMBDIRECT_DEBUG_ERR_PTR((__sc)->first_error), \
__src, __dst); \
__unexpected_cmd \
})
#define SMBDIRECT_CHECK_STATUS_WARN(__sc, __expected_status) \
__SMBDIRECT_CHECK_STATUS_WARN(__sc, __expected_status, )
#define SMBDIRECT_CHECK_STATUS_DISCONNECT(__sc, __expected_status) \
__SMBDIRECT_CHECK_STATUS_WARN(__sc, __expected_status, \
__SMBDIRECT_SOCKET_DISCONNECT(__sc);)
struct smbdirect_send_io {
struct smbdirect_socket *socket;
struct ib_cqe cqe;
#define SMBDIRECT_SEND_IO_MAX_SGE 6
size_t num_sge;
struct ib_sge sge[SMBDIRECT_SEND_IO_MAX_SGE];
struct list_head sibling_list;
struct ib_send_wr wr;
u8 packet[];
};
struct smbdirect_send_batch {
struct list_head msg_list;
size_t wr_cnt;
bool need_invalidate_rkey;
u32 remote_key;
int credit;
};
struct smbdirect_recv_io {
struct smbdirect_socket *socket;
struct ib_cqe cqe;
#define SMBDIRECT_RECV_IO_MAX_SGE 1
struct ib_sge sge;
struct list_head list;
bool first_segment;
u8 packet[];
};
enum smbdirect_mr_state {
SMBDIRECT_MR_READY,
SMBDIRECT_MR_REGISTERED,
SMBDIRECT_MR_INVALIDATED,
SMBDIRECT_MR_ERROR,
SMBDIRECT_MR_DISABLED
};
struct smbdirect_mr_io {
struct smbdirect_socket *socket;
struct ib_cqe cqe;
struct kref kref;
struct mutex mutex;
struct list_head list;
enum smbdirect_mr_state state;
struct ib_mr *mr;
struct sg_table sgt;
enum dma_data_direction dir;
union {
struct ib_reg_wr wr;
struct ib_send_wr inv_wr;
};
bool need_invalidate;
struct completion invalidate_done;
};
struct smbdirect_rw_io {
struct smbdirect_socket *socket;
struct ib_cqe cqe;
struct list_head list;
int error;
struct completion *completion;
struct rdma_rw_ctx rdma_ctx;
struct sg_table sgt;
struct scatterlist sg_list[];
};
#endif