mdp4_crtc
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
event = mdp4_crtc->event;
mdp4_crtc->event = NULL;
DBG("%s: send event: %p", mdp4_crtc->name, event);
struct mdp4_crtc *mdp4_crtc =
container_of(work, struct mdp4_crtc, unref_cursor_work);
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
int i, ovlp = mdp4_crtc->ovlp;
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
enum mdp4_dma dma = mdp4_crtc->dma;
int ovlp = mdp4_crtc->ovlp;
mdp4_crtc->name, DRM_MODE_ARG(mode));
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
DBG("%s", mdp4_crtc->name);
if (WARN_ON(!mdp4_crtc->enabled))
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
WARN_ON(mdp4_crtc->event);
mdp4_crtc->enabled = false;
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
DBG("%s", mdp4_crtc->name);
if (WARN_ON(mdp4_crtc->enabled))
mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_crtc->enabled = true;
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
DBG("%s: check", mdp4_crtc->name);
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
DBG("%s: begin", mdp4_crtc->name);
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
WARN_ON(mdp4_crtc->event);
mdp4_crtc->event = crtc->state->event;
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
enum mdp4_dma dma = mdp4_crtc->dma;
spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
if (mdp4_crtc->cursor.stale) {
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
uint64_t iova = mdp4_crtc->cursor.next_iova;
MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
mdp4_crtc->cursor.scanout_bo = next_bo;
mdp4_crtc->cursor.stale = false;
MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
old_bo = mdp4_crtc->cursor.next_bo;
mdp4_crtc->cursor.next_bo = cursor_bo;
mdp4_crtc->cursor.next_iova = iova;
mdp4_crtc->cursor.width = width;
mdp4_crtc->cursor.height = height;
mdp4_crtc->cursor.stale = true;
spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
mdp4_crtc->cursor.x = x;
mdp4_crtc->cursor.y = y;
spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
struct drm_crtc *crtc = &mdp4_crtc->base;
mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
pending = atomic_xchg(&mdp4_crtc->pending, 0);
drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->kms->wq);
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
struct drm_crtc *crtc = &mdp4_crtc->base;
DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
mdp4_crtc->flushed_mask),
dev_warn(dev->dev, "vblank time out, crtc=%s\n", mdp4_crtc->base.name);
mdp4_crtc->flushed_mask = 0;
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
return mdp4_crtc->vblank.irqmask;
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
switch (mdp4_crtc->dma) {
mdp4_crtc->mixer = mixer;
#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
struct mdp4_crtc *mdp4_crtc = ptr;
drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
struct mdp4_crtc *mdp4_crtc;
mdp4_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp4_crtc, base,
if (IS_ERR(mdp4_crtc))
return ERR_CAST(mdp4_crtc);
crtc = &mdp4_crtc->base;
mdp4_crtc->ovlp = ovlp_id;
mdp4_crtc->dma = dma_id;
mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
mdp4_crtc->err.irq = mdp4_crtc_err_irq;
snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
spin_lock_init(&mdp4_crtc->cursor.lock);
drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
ret = drmm_add_action_or_reset(dev, mdp4_crtc_flip_cleanup, mdp4_crtc);
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
atomic_or(pending, &mdp4_crtc->pending);
mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
flush |= ovlp2flush(mdp4_crtc->ovlp);
DBG("%s: flush=%08x", mdp4_crtc->name, flush);
mdp4_crtc->flushed_mask = flush;