dmm
struct dmm;
struct dmm *dmm;
dmaengine_terminate_all(dmm->wa_dma_chan);
static u32 dmm_read_wa(struct dmm *dmm, u32 reg)
src = dmm->phys_base + reg;
dst = dmm->wa_dma_handle;
r = dmm_dma_copy(dmm, src, dst);
dev_err(dmm->dev, "sDMA read transfer timeout\n");
return readl(dmm->base + reg);
return readl((__iomem void *)dmm->wa_dma_data);
static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg)
writel(val, (__iomem void *)dmm->wa_dma_data);
src = dmm->wa_dma_handle;
dst = dmm->phys_base + reg;
r = dmm_dma_copy(dmm, src, dst);
dev_err(dmm->dev, "sDMA write transfer timeout\n");
writel(val, dmm->base + reg);
static u32 dmm_read(struct dmm *dmm, u32 reg)
if (dmm->dmm_workaround) {
spin_lock_irqsave(&dmm->wa_lock, flags);
v = dmm_read_wa(dmm, reg);
spin_unlock_irqrestore(&dmm->wa_lock, flags);
return readl(dmm->base + reg);
static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
if (dmm->dmm_workaround) {
spin_lock_irqsave(&dmm->wa_lock, flags);
dmm_write_wa(dmm, val, reg);
spin_unlock_irqrestore(&dmm->wa_lock, flags);
writel(val, dmm->base + reg);
static int dmm_workaround_init(struct dmm *dmm)
spin_lock_init(&dmm->wa_lock);
dmm->wa_dma_data = dma_alloc_coherent(dmm->dev, sizeof(u32),
&dmm->wa_dma_handle, GFP_KERNEL);
if (!dmm->wa_dma_data)
dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL);
if (!dmm->wa_dma_chan) {
dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
static void dmm_workaround_uninit(struct dmm *dmm)
dma_release_channel(dmm->wa_dma_chan);
dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
struct dmm *dmm = engine->dmm;
r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
dev_err(dmm->dev,
dev_err(dmm->dev,
struct dmm *dmm = arg;
u32 status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
for (i = 0; i < dmm->num_engines; i++) {
dev_err(dmm->dev,
if (dmm->engines[i].async)
release_engine(&dmm->engines[i]);
complete(&dmm->engines[i].compl);
static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
if (!list_empty(&dmm->idle_head)) {
engine = list_entry(dmm->idle_head.next, struct refill_engine,
static struct dmm *omap_dmm;
page_to_phys(pages[n]) : engine->dmm->dummy_pa;
struct dmm *dmm = engine->dmm;
dev_err(engine->dmm->dev, "need at least one txn\n");
dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
dev_err(dmm->dev, "timed out waiting for done\n");
static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
tx = dmaengine_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
omap_dmm->engines[i].dmm = omap_dmm;
dev_err(dmm->dev, "Failed to do DMA tx_submit\n");
status = dma_sync_wait(dmm->wa_dma_chan, cookie);
dev_err(dmm->dev, "i878 wa DMA copy failure\n");