#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <dev/hyperv/vmbus/vmbus_reg.h>
#include <dev/hyperv/vmbus/vmbus_brvar.h>
#define VMBUS_BR_WAVAIL(r, w, z) \
(((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w)))
#define VMBUS_BR_IDXINC(idx, inc, sz) (((idx) + (inc)) % (sz))
static int vmbus_br_sysctl_state(SYSCTL_HANDLER_ARGS);
static int vmbus_br_sysctl_state_bin(SYSCTL_HANDLER_ARGS);
static void vmbus_br_setup(struct vmbus_br *, void *, int);
static int
vmbus_br_sysctl_state(SYSCTL_HANDLER_ARGS)
{
const struct vmbus_br *br = arg1;
uint32_t rindex, windex, imask, psndsz, fvalue, ravail, wavail;
uint64_t intrcnt;
char state[256];
intrcnt = br->vbr_intrcnt;
rindex = br->vbr_rindex;
windex = br->vbr_windex;
imask = br->vbr_imask;
psndsz = br->vbr_psndsz;
fvalue = br->vbr_fvalue;
wavail = VMBUS_BR_WAVAIL(rindex, windex, br->vbr_dsize);
ravail = br->vbr_dsize - wavail;
snprintf(state, sizeof(state),
"intrcnt:%ju rindex:%u windex:%u imask:%u psndsz:%u fvalue:%u "
"ravail:%u wavail:%u",
(uintmax_t)intrcnt, rindex, windex, imask, psndsz, fvalue,
ravail, wavail);
return sysctl_handle_string(oidp, state, sizeof(state), req);
}
static int
vmbus_br_sysctl_state_bin(SYSCTL_HANDLER_ARGS)
{
#define BR_STATE_RIDX 0
#define BR_STATE_WIDX 1
#define BR_STATE_IMSK 2
#define BR_STATE_PSSZ 3
#define BR_STATE_FVAL 4
#define BR_STATE_RSPC 5
#define BR_STATE_WSPC 6
#define BR_STATE_MAX 7
const struct vmbus_br *br = arg1;
uint32_t rindex, windex, wavail, state[BR_STATE_MAX];
rindex = br->vbr_rindex;
windex = br->vbr_windex;
wavail = VMBUS_BR_WAVAIL(rindex, windex, br->vbr_dsize);
state[BR_STATE_RIDX] = rindex;
state[BR_STATE_WIDX] = windex;
state[BR_STATE_IMSK] = br->vbr_imask;
state[BR_STATE_PSSZ] = br->vbr_psndsz;
state[BR_STATE_FVAL] = br->vbr_fvalue;
state[BR_STATE_WSPC] = wavail;
state[BR_STATE_RSPC] = br->vbr_dsize - wavail;
return sysctl_handle_opaque(oidp, state, sizeof(state), req);
}
void
vmbus_br_sysctl_create(struct sysctl_ctx_list *ctx, struct sysctl_oid *br_tree,
struct vmbus_br *br, const char *name)
{
struct sysctl_oid *tree;
char desc[64];
tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(br_tree), OID_AUTO,
name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
if (tree == NULL)
return;
snprintf(desc, sizeof(desc), "%s state", name);
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "state",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
br, 0, vmbus_br_sysctl_state, "A", desc);
snprintf(desc, sizeof(desc), "%s binary state", name);
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "state_bin",
CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
br, 0, vmbus_br_sysctl_state_bin, "IU", desc);
}
void
vmbus_rxbr_intr_mask(struct vmbus_rxbr *rbr)
{
rbr->rxbr_imask = 1;
mb();
}
static __inline uint32_t
vmbus_rxbr_avail(const struct vmbus_rxbr *rbr)
{
uint32_t rindex, windex;
rindex = atomic_load_acq_32(&rbr->rxbr_rindex);
windex = atomic_load_acq_32(&rbr->rxbr_windex);
return (rbr->rxbr_dsize -
VMBUS_BR_WAVAIL(rindex, windex, rbr->rxbr_dsize));
}
uint32_t
vmbus_rxbr_available(const struct vmbus_rxbr *rbr)
{
return (vmbus_rxbr_avail(rbr));
}
uint32_t
vmbus_rxbr_intr_unmask(struct vmbus_rxbr *rbr)
{
rbr->rxbr_imask = 0;
mb();
return vmbus_rxbr_avail(rbr);
}
static void
vmbus_br_setup(struct vmbus_br *br, void *buf, int blen)
{
br->vbr = buf;
br->vbr_dsize = blen - sizeof(struct vmbus_bufring);
}
void
vmbus_rxbr_init(struct vmbus_rxbr *rbr)
{
mtx_init(&rbr->rxbr_lock, "vmbus_rxbr", NULL, MTX_SPIN);
}
void
vmbus_rxbr_deinit(struct vmbus_rxbr *rbr)
{
mtx_destroy(&rbr->rxbr_lock);
}
void
vmbus_rxbr_setup(struct vmbus_rxbr *rbr, void *buf, int blen)
{
vmbus_br_setup(&rbr->rxbr, buf, blen);
}
static __inline boolean_t
vmbus_rxbr_need_signal(const struct vmbus_rxbr *rbr, uint32_t bytes_read)
{
uint32_t pending_snd_sz, canwrite_size;
if (!rbr->rxbr_fpsndsz)
return false;
mb();
pending_snd_sz = rbr->rxbr_psndsz;
if (!pending_snd_sz)
return false;
mb();
canwrite_size = rbr->rxbr_dsize - vmbus_rxbr_avail(rbr);
if (canwrite_size - bytes_read > pending_snd_sz)
return false;
if (canwrite_size <= pending_snd_sz)
return false;
return true;
}
void
vmbus_txbr_init(struct vmbus_txbr *tbr)
{
mtx_init(&tbr->txbr_lock, "vmbus_txbr", NULL, MTX_SPIN);
}
void
vmbus_txbr_deinit(struct vmbus_txbr *tbr)
{
mtx_destroy(&tbr->txbr_lock);
}
void
vmbus_txbr_setup(struct vmbus_txbr *tbr, void *buf, int blen)
{
vmbus_br_setup(&tbr->txbr, buf, blen);
tbr->txbr_fpsndsz = 1;
}
uint32_t
vmbus_txbr_get_imask(const struct vmbus_txbr *tbr)
{
mb();
return(tbr->txbr_imask);
}
void
vmbus_txbr_set_pending_snd_sz(struct vmbus_txbr *tbr, uint32_t size)
{
tbr->txbr_psndsz = size;
}
static __inline boolean_t
vmbus_txbr_need_signal(const struct vmbus_txbr *tbr, uint32_t old_windex)
{
mb();
if (tbr->txbr_imask)
return (FALSE);
__compiler_membar();
if (old_windex == atomic_load_acq_32(&tbr->txbr_rindex))
return (TRUE);
return (FALSE);
}
static __inline uint32_t
vmbus_txbr_avail(const struct vmbus_txbr *tbr)
{
uint32_t rindex, windex;
rindex = atomic_load_acq_32(&tbr->txbr_rindex);
windex = atomic_load_acq_32(&tbr->txbr_windex);
return VMBUS_BR_WAVAIL(rindex, windex, tbr->txbr_dsize);
}
static __inline uint32_t
vmbus_txbr_copyto(const struct vmbus_txbr *tbr, uint32_t windex,
const void *src0, uint32_t cplen)
{
const uint8_t *src = src0;
uint8_t *br_data = tbr->txbr_data;
uint32_t br_dsize = tbr->txbr_dsize;
if (cplen > br_dsize - windex) {
uint32_t fraglen = br_dsize - windex;
memcpy(br_data + windex, src, fraglen);
memcpy(br_data, src + fraglen, cplen - fraglen);
} else {
memcpy(br_data + windex, src, cplen);
}
return VMBUS_BR_IDXINC(windex, cplen, br_dsize);
}
static __inline uint32_t
vmbus_txbr_copyto_call(const struct vmbus_txbr *tbr, uint32_t windex,
uint32_t cplen, vmbus_br_copy_callback_t cb, void *cbarg, int *ret)
{
uint8_t *br_data = tbr->txbr_data;
uint32_t br_dsize = tbr->txbr_dsize;
int err = 0;
if (cplen > br_dsize - windex) {
uint32_t fraglen = br_dsize - windex;
err = cb((void *)(br_data + windex), fraglen, cbarg);
if (!err)
err = cb((void *)br_data, cplen - fraglen, cbarg);
} else {
err = cb((void *)(br_data + windex), cplen, cbarg);
}
*ret = err;
return VMBUS_BR_IDXINC(windex, cplen, br_dsize);
}
uint32_t
vmbus_txbr_available(const struct vmbus_txbr *tbr)
{
return (vmbus_txbr_avail(tbr));
}
int
vmbus_txbr_write_call(struct vmbus_txbr *tbr,
const struct iovec iov[], int iovlen,
vmbus_br_copy_callback_t cb, void *cbarg,
boolean_t *need_sig)
{
uint32_t old_windex, windex, total;
uint64_t save_windex;
int i;
int cb_ret = 0;
total = 0;
for (i = 0; i < iovlen; i++)
total += iov[i].iov_len;
total += sizeof(save_windex);
if (vmbus_txbr_avail(tbr) <= total) {
return (EAGAIN);
}
old_windex = tbr->txbr_windex;
windex = old_windex;
for (i = 0; i < iovlen; i++) {
if (iov[i].iov_base != NULL) {
windex = vmbus_txbr_copyto(tbr, windex,
iov[i].iov_base, iov[i].iov_len);
} else if (cb != NULL) {
windex = vmbus_txbr_copyto_call(tbr, windex,
iov[i].iov_len, cb, cbarg, &cb_ret);
if (cb_ret)
return (cb_ret);
}
}
mtx_lock_spin(&tbr->txbr_lock);
save_windex = ((uint64_t)old_windex) << 32;
windex = vmbus_txbr_copyto(tbr, windex, &save_windex,
sizeof(save_windex));
__compiler_membar();
atomic_store_rel_32(&tbr->txbr_windex, windex);
mtx_unlock_spin(&tbr->txbr_lock);
if (need_sig)
*need_sig = vmbus_txbr_need_signal(tbr, old_windex);
return (0);
}
int
vmbus_txbr_write(struct vmbus_txbr *tbr, const struct iovec iov[], int iovlen,
boolean_t *need_sig)
{
uint32_t old_windex, windex, total;
uint64_t save_windex;
int i;
total = 0;
for (i = 0; i < iovlen; i++)
total += iov[i].iov_len;
total += sizeof(save_windex);
mtx_lock_spin(&tbr->txbr_lock);
if (vmbus_txbr_avail(tbr) <= total) {
mtx_unlock_spin(&tbr->txbr_lock);
return (EAGAIN);
}
old_windex = atomic_load_acq_32(&tbr->txbr_windex);
windex = old_windex;
for (i = 0; i < iovlen; i++) {
windex = vmbus_txbr_copyto(tbr, windex,
iov[i].iov_base, iov[i].iov_len);
}
save_windex = ((uint64_t)old_windex) << 32;
windex = vmbus_txbr_copyto(tbr, windex, &save_windex,
sizeof(save_windex));
__compiler_membar();
atomic_store_rel_32(&tbr->txbr_windex, windex);
mtx_unlock_spin(&tbr->txbr_lock);
*need_sig = vmbus_txbr_need_signal(tbr, old_windex);
return (0);
}
static __inline uint32_t
vmbus_rxbr_copyfrom(const struct vmbus_rxbr *rbr, uint32_t rindex,
void *dst0, int cplen)
{
uint8_t *dst = dst0;
const uint8_t *br_data = rbr->rxbr_data;
uint32_t br_dsize = rbr->rxbr_dsize;
if (cplen > br_dsize - rindex) {
uint32_t fraglen = br_dsize - rindex;
memcpy(dst, br_data + rindex, fraglen);
memcpy(dst + fraglen, br_data, cplen - fraglen);
} else {
memcpy(dst, br_data + rindex, cplen);
}
return VMBUS_BR_IDXINC(rindex, cplen, br_dsize);
}
static __inline uint32_t
vmbus_rxbr_copyfrom_call(const struct vmbus_rxbr *rbr, uint32_t rindex,
int cplen, vmbus_br_copy_callback_t cb, void *cbarg)
{
uint8_t *br_data = rbr->rxbr_data;
uint32_t br_dsize = rbr->rxbr_dsize;
int error = 0;
if (cplen > br_dsize - rindex) {
uint32_t fraglen = br_dsize - rindex;
error = cb((void *)(br_data + rindex), fraglen, cbarg);
if (!error)
error = cb((void *)br_data, cplen - fraglen, cbarg);
} else {
error = cb((void *)(br_data + rindex), cplen, cbarg);
}
return (error);
}
int
vmbus_rxbr_peek(struct vmbus_rxbr *rbr, void *data, int dlen)
{
mtx_lock_spin(&rbr->rxbr_lock);
if (vmbus_rxbr_avail(rbr) < dlen + sizeof(uint64_t)) {
mtx_unlock_spin(&rbr->rxbr_lock);
return (EAGAIN);
}
vmbus_rxbr_copyfrom(rbr,
atomic_load_acq_32(&rbr->rxbr_rindex), data, dlen);
mtx_unlock_spin(&rbr->rxbr_lock);
return (0);
}
int
vmbus_rxbr_peek_call(struct vmbus_rxbr *rbr, int dlen, uint32_t skip,
vmbus_br_copy_callback_t cb, void *cbarg)
{
uint32_t rindex, br_dsize0 = rbr->rxbr_dsize;
int ret;
mtx_lock_spin(&rbr->rxbr_lock);
if (vmbus_rxbr_avail(rbr) < skip + dlen + sizeof(uint64_t)) {
mtx_unlock_spin(&rbr->rxbr_lock);
return (EAGAIN);
}
rindex = VMBUS_BR_IDXINC(rbr->rxbr_rindex, skip, br_dsize0);
mtx_unlock_spin(&rbr->rxbr_lock);
ret = vmbus_rxbr_copyfrom_call(rbr, rindex, dlen, cb, cbarg);
return (ret);
}
int
vmbus_rxbr_idxadv_peek(struct vmbus_rxbr *rbr, void *data, int dlen,
uint32_t idx_adv, boolean_t *need_sig)
{
uint32_t rindex, br_dsize = rbr->rxbr_dsize;
mtx_lock_spin(&rbr->rxbr_lock);
if (vmbus_rxbr_avail(rbr) < idx_adv + sizeof(uint64_t) + dlen) {
mtx_unlock_spin(&rbr->rxbr_lock);
return (EAGAIN);
}
if (idx_adv > 0) {
rindex = VMBUS_BR_IDXINC(rbr->rxbr_rindex,
idx_adv + sizeof(uint64_t), br_dsize);
__compiler_membar();
atomic_store_rel_32(&rbr->rxbr_rindex, rindex);
}
vmbus_rxbr_copyfrom(rbr,
atomic_load_acq_32(&rbr->rxbr_rindex), data, dlen);
mtx_unlock_spin(&rbr->rxbr_lock);
if (need_sig) {
if (idx_adv > 0)
*need_sig =
vmbus_rxbr_need_signal(rbr, idx_adv +
sizeof(uint64_t));
else
*need_sig = false;
}
return (0);
}
int
vmbus_rxbr_idxadv(struct vmbus_rxbr *rbr, uint32_t idx_adv,
boolean_t *need_sig)
{
uint32_t rindex, br_dsize = rbr->rxbr_dsize;
mtx_lock_spin(&rbr->rxbr_lock);
if (vmbus_rxbr_avail(rbr) < idx_adv + sizeof(uint64_t)) {
mtx_unlock_spin(&rbr->rxbr_lock);
return (EAGAIN);
}
rindex = VMBUS_BR_IDXINC(rbr->rxbr_rindex,
idx_adv + sizeof(uint64_t), br_dsize);
__compiler_membar();
atomic_store_rel_32(&rbr->rxbr_rindex, rindex);
mtx_unlock_spin(&rbr->rxbr_lock);
if (need_sig) {
*need_sig =
vmbus_rxbr_need_signal(rbr, idx_adv + sizeof(uint64_t));
}
return (0);
}
int
vmbus_rxbr_read(struct vmbus_rxbr *rbr, void *data, int dlen, uint32_t skip,
boolean_t *need_sig)
{
uint32_t rindex, br_dsize = rbr->rxbr_dsize;
KASSERT(dlen + skip > 0, ("invalid dlen %d, offset %u", dlen, skip));
mtx_lock_spin(&rbr->rxbr_lock);
if (vmbus_rxbr_avail(rbr) < dlen + skip + sizeof(uint64_t)) {
mtx_unlock_spin(&rbr->rxbr_lock);
return (EAGAIN);
}
rindex = VMBUS_BR_IDXINC(atomic_load_acq_32(&rbr->rxbr_rindex),
skip, br_dsize);
rindex = vmbus_rxbr_copyfrom(rbr, rindex, data, dlen);
rindex = VMBUS_BR_IDXINC(rindex, sizeof(uint64_t), br_dsize);
__compiler_membar();
atomic_store_rel_32(&rbr->rxbr_rindex, rindex);
mtx_unlock_spin(&rbr->rxbr_lock);
if (need_sig) {
*need_sig =
vmbus_rxbr_need_signal(rbr,
dlen + skip + sizeof(uint64_t));
}
return (0);
}