dma_xfer
EXPORT_SYMBOL(dma_xfer);
extern int dma_xfer(unsigned int chan, unsigned long from,
dma_xfer(chan, from, to, size, DMA_MODE_WRITE)
dma_xfer(chan, from, to, size, DMA_MODE_READ)
struct dma_xfer *xfer;
struct dma_xfer *x;
struct ioctl_resources *resources, struct dma_xfer *xfer)
static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wrappers,
static void cleanup_xfer(struct qaic_device *qdev, struct dma_xfer *xfer)
struct dma_xfer *xfer;
ret = dma_xfer(acdev, src, dest, dma_len);
struct dma_xfer_hbm dma_xfer;
dma_xfer.hbm = DMA_XFER;
dma_xfer.fw_client_id = cl->fw_client_id;
dma_xfer.host_client_id = cl->host_client_id;
dma_xfer.reserved = 0;
dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
dma_xfer.msg_length = cl_msg->send_buf.size;
dma_xfer.reserved2 = 0;
ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
struct dma_xfer_hbm *dma_xfer)
offs = dma_xfer->msg_addr - dev->ishtp_host_dma_tx_buf_phys;
if (dma_xfer->msg_length >
ishtp_cl_release_dma_acked_mem(dev, msg, dma_xfer->msg_length);
if (cl->fw_client_id == dma_xfer->fw_client_id &&
cl->host_client_id == dma_xfer->host_client_id)
dma_xfer->msg_length) {
++dma_xfer;
struct dma_xfer_hbm *dma_xfer)
struct dma_xfer_hbm *prm = dma_xfer;
offs = dma_xfer->msg_addr - dev->ishtp_host_dma_rx_buf_phys;
if (dma_xfer->msg_length >
recv_ishtp_cl_msg_dma(dev, msg, dma_xfer);
dma_xfer->hbm = DMA_XFER_ACK; /* Prepare for response */
++dma_xfer;
struct dma_xfer_hbm *dma_xfer;
dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
ishtp_hbm_dma_xfer(dev, dma_xfer);
dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
ishtp_hbm_dma_xfer_ack(dev, dma_xfer);
struct i3c_dma *dma_xfer __free(kfree) = kzalloc_obj(*dma_xfer);
if (!dma_xfer)
dma_xfer->dev = dev;
dma_xfer->buf = buf;
dma_xfer->dir = dir;
dma_xfer->len = len;
dma_xfer->map_len = len;
dma_xfer->map_len = ALIGN(len, cache_line_size());
bounce = kzalloc(dma_xfer->map_len, GFP_KERNEL);
bounce = kmemdup(buf, dma_xfer->map_len, GFP_KERNEL);
dma_xfer->addr = dma_map_single(dev, dma_buf, dma_xfer->map_len, dir);
if (dma_mapping_error(dev, dma_xfer->addr))
dma_xfer->bounce_buf = no_free_ptr(bounce);
return no_free_ptr(dma_xfer);
void i3c_master_dma_unmap_single(struct i3c_dma *dma_xfer)
dma_unmap_single(dma_xfer->dev, dma_xfer->addr,
dma_xfer->map_len, dma_xfer->dir);
if (dma_xfer->bounce_buf) {
if (dma_xfer->dir == DMA_FROM_DEVICE)
memcpy(dma_xfer->buf, dma_xfer->bounce_buf,
dma_xfer->len);
kfree(dma_xfer->bounce_buf);
kfree(dma_xfer);
dma_xfer(host, buf, len, DMA_FROM_DEVICE);
dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
if (task->ata_task.dma_xfer)
} else if (task->ata_task.dma_xfer) {
if (task->ata_task.dma_xfer) {
task->ata_task.dma_xfer = ata_is_dma(qc->tf.protocol);
if (t->ata_task.dma_xfer == 0 &&
} else if (task->ata_task.dma_xfer) {
if (t->ata_task.dma_xfer == 0 &&
} else if (task->ata_task.dma_xfer) {
} *dma_xfer;
dma_xfer = kzalloc_objs(*dma_xfer, sgs);
if (!dma_xfer)
dma_xfer[sg_count].pio[0] = ctrl0;
dma_xfer[sg_count].pio[3] = min;
sg_init_table(&dma_xfer[sg_count].sg, 1);
sg_set_page(&dma_xfer[sg_count].sg, vm_page,
sg_init_one(&dma_xfer[sg_count].sg, buf, min);
ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
(struct scatterlist *)dma_xfer[sg_count].pio,
&dma_xfer[sg_count].sg, 1,
dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
kfree(dma_xfer);
void i3c_master_dma_unmap_single(struct i3c_dma *dma_xfer);
u8 dma_xfer:1; /* PIO:0 or DMA:1 */
dma_xfer(dma_engine, data, run_size_words*sizeof(u32));
err = dma_xfer(AICA_DMA_CHANNEL,