#include <oce_impl.h>
static ddi_dma_attr_t oce_dma_buf_attr = {
DMA_ATTR_V0,
0x0000000000000000ull,
0xFFFFFFFFFFFFFFFFull,
0x00000000FFFFFFFFull,
OCE_DMA_ALIGNMENT,
0x00000FFF,
0x00000001,
0x00000000FFFFFFFFull,
0xFFFFFFFFFFFFFFFFull,
1,
0x00000001,
0
};
static ddi_device_acc_attr_t oce_dma_buf_accattr = {
DDI_DEVICE_ATTR_V0,
DDI_NEVERSWAP_ACC,
DDI_STRICTORDER_ACC,
};
oce_dma_buf_t *
oce_alloc_dma_buffer(struct oce_dev *dev,
uint32_t size, ddi_dma_attr_t *dma_attr, uint32_t flags)
{
oce_dma_buf_t *dbuf;
ddi_dma_cookie_t cookie;
uint32_t count;
size_t actual_len;
int ret = 0;
ASSERT(size > 0);
if (dma_attr == NULL) {
dma_attr = &oce_dma_buf_attr;
}
dbuf = kmem_zalloc(sizeof (oce_dma_buf_t), KM_NOSLEEP);
if (dbuf == NULL) {
return (NULL);
}
ret = ddi_dma_alloc_handle(dev->dip, dma_attr,
DDI_DMA_DONTWAIT, NULL, &dbuf->dma_handle);
if (ret != DDI_SUCCESS) {
oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
"Failed to allocate DMA handle");
goto handle_fail;
}
ret = ddi_dma_mem_alloc(dbuf->dma_handle, size, &oce_dma_buf_accattr,
flags, DDI_DMA_DONTWAIT, NULL, &dbuf->base,
&actual_len, &dbuf->acc_handle);
if (ret != DDI_SUCCESS) {
oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
"Failed to allocate DMA memory");
goto alloc_fail;
}
ret = ddi_dma_addr_bind_handle(dbuf->dma_handle,
(struct as *)0, dbuf->base, actual_len,
DDI_DMA_RDWR | flags,
DDI_DMA_DONTWAIT, NULL, &cookie, &count);
if (ret != DDI_DMA_MAPPED) {
oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
"Failed to bind dma handle");
goto bind_fail;
}
bzero(dbuf->base, actual_len);
dbuf->addr = cookie.dmac_laddress;
dbuf->size = actual_len;
dbuf->len = size;
dbuf->num_pages = OCE_NUM_PAGES(size);
return (dbuf);
bind_fail:
ddi_dma_mem_free(&dbuf->acc_handle);
alloc_fail:
ddi_dma_free_handle(&dbuf->dma_handle);
handle_fail:
kmem_free(dbuf, sizeof (oce_dma_buf_t));
return (NULL);
}
void
oce_free_dma_buffer(struct oce_dev *dev, oce_dma_buf_t *dbuf)
{
_NOTE(ARGUNUSED(dev));
if (dbuf == NULL) {
return;
}
if (dbuf->dma_handle != NULL) {
(void) ddi_dma_unbind_handle(dbuf->dma_handle);
}
if (dbuf->acc_handle != NULL) {
ddi_dma_mem_free(&dbuf->acc_handle);
}
if (dbuf->dma_handle != NULL) {
ddi_dma_free_handle(&dbuf->dma_handle);
}
kmem_free(dbuf, sizeof (oce_dma_buf_t));
}
oce_ring_buffer_t *
create_ring_buffer(struct oce_dev *dev,
uint32_t num_items, uint32_t item_size, uint32_t flags)
{
oce_ring_buffer_t *ring;
uint32_t size;
ring = kmem_zalloc(sizeof (oce_ring_buffer_t), KM_NOSLEEP);
if (ring == NULL) {
return (NULL);
}
size = num_items * item_size;
ring->dbuf = oce_alloc_dma_buffer(dev, size, NULL, flags);
if (ring->dbuf == NULL) {
oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
"Ring buffer allocation failed");
goto dbuf_fail;
}
ring->num_items = num_items;
ring->item_size = item_size;
ring->num_used = 0;
return (ring);
dbuf_fail:
kmem_free(ring, sizeof (oce_ring_buffer_t));
return (NULL);
}
void
destroy_ring_buffer(struct oce_dev *dev, oce_ring_buffer_t *ring)
{
ASSERT(dev != NULL);
ASSERT(ring != NULL);
oce_free_dma_buffer(dev, ring->dbuf);
ring->dbuf = NULL;
kmem_free(ring, sizeof (oce_ring_buffer_t));
}
void
oce_set_dma_fma_flags(int fm_caps)
{
if (fm_caps == DDI_FM_NOT_CAPABLE) {
return;
}
oce_dma_buf_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
if (DDI_FM_DMA_ERR_CAP(fm_caps)) {
oce_dma_buf_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
} else {
oce_dma_buf_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
}
}