hdq_data
while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
static u8 hdq_reset_irqstatus(struct hdq_data *hdq_data, u8 bits)
spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
status = hdq_data->hdq_irqstatus;
hdq_data->hdq_irqstatus &= ~bits;
spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
if (hdq_data->hdq_irqstatus)
dev_err(hdq_data->dev, "TX irqstatus not cleared (%02x)\n",
hdq_data->hdq_irqstatus);
hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
*status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
dev_dbg(hdq_data->dev, "TX wait elapsed\n");
dev_dbg(hdq_data->dev, "timeout waiting for"
ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
dev_dbg(hdq_data->dev, "timeout waiting GO bit"
mutex_unlock(&hdq_data->hdq_mutex);
struct hdq_data *hdq_data = _hdq;
spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
hdq_data->hdq_irqstatus |= hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus);
if (hdq_data->hdq_irqstatus &
static int omap_hdq_break(struct hdq_data *hdq_data)
ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
if (hdq_data->hdq_irqstatus)
dev_err(hdq_data->dev, "break irqstatus not cleared (%02x)\n",
hdq_data->hdq_irqstatus);
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TIMEOUT),
tmp_status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TIMEOUT);
dev_dbg(hdq_data->dev, "break wait elapsed\n");
dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n",
if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) &
dev_dbg(hdq_data->dev, "Presence bit not set\n");
ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
mutex_unlock(&hdq_data->hdq_mutex);
static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
if (pm_runtime_suspended(hdq_data->dev)) {
if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
(hdq_data->hdq_irqstatus
status = hdq_reset_irqstatus(hdq_data,
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
dev_dbg(hdq_data->dev, "timeout waiting for"
hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
*val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
mutex_unlock(&hdq_data->hdq_mutex);
struct hdq_data *hdq_data = _hdq;
err = pm_runtime_get_sync(hdq_data->dev);
pm_runtime_put_noidle(hdq_data->dev);
err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
(hdq_data->hdq_irqstatus
hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
dev_dbg(hdq_data->dev, "RX wait elapsed\n");
(hdq_data->hdq_irqstatus
hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
dev_dbg(hdq_data->dev, "RX wait elapsed\n");
(hdq_data->hdq_irqstatus
hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
dev_dbg(hdq_data->dev, "TX wait elapsed\n");
mutex_unlock(&hdq_data->hdq_mutex);
pm_runtime_put_autosuspend(hdq_data->dev);
struct hdq_data *hdq_data = _hdq;
err = pm_runtime_get_sync(hdq_data->dev);
pm_runtime_put_noidle(hdq_data->dev);
omap_hdq_break(hdq_data);
pm_runtime_put_autosuspend(hdq_data->dev);
struct hdq_data *hdq_data = _hdq;
ret = pm_runtime_get_sync(hdq_data->dev);
pm_runtime_put_noidle(hdq_data->dev);
ret = hdq_read_byte(hdq_data, &val);
pm_runtime_put_autosuspend(hdq_data->dev);
struct hdq_data *hdq_data = _hdq;
ret = pm_runtime_get_sync(hdq_data->dev);
pm_runtime_put_noidle(hdq_data->dev);
omap_hdq_break(hdq_data);
ret = hdq_write_byte(hdq_data, byte, &status);
dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
pm_runtime_put_autosuspend(hdq_data->dev);
struct hdq_data *hdq_data = dev_get_drvdata(dev);
hdq_reg_out(hdq_data, 0, hdq_data->mode);
hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
struct hdq_data *hdq_data = dev_get_drvdata(dev);
hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
hdq_data->mode);
hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
struct hdq_data *hdq_data;
hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
if (!hdq_data)
hdq_data->dev = dev;
platform_set_drvdata(pdev, hdq_data);
hdq_data->hdq_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdq_data->hdq_base))
return PTR_ERR(hdq_data->hdq_base);
mutex_init(&hdq_data->hdq_mutex);
hdq_data->mode = 0;
hdq_data->mode = 1;
rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
spin_lock_init(&hdq_data->hdq_spinlock);
ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
omap_hdq_break(hdq_data);
omap_w1_master.data = hdq_data;
return __raw_readl(hdq_data->hdq_base + offset);
static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
__raw_writel(val, hdq_data->hdq_base + offset);
static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
__raw_writel(new_val, hdq_data->hdq_base + offset);
static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
while (((*status = hdq_reg_in(hdq_data, offset)) & flag)