dw_wdt
#define to_dw_wdt(wdd) container_of(wdd, struct dw_wdt, wdd)
static inline int dw_wdt_is_enabled(struct dw_wdt *dw_wdt)
return readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET) &
static void dw_wdt_update_mode(struct dw_wdt *dw_wdt, enum dw_wdt_rmod rmod)
val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
dw_wdt->rmod = rmod;
static unsigned int dw_wdt_find_best_top(struct dw_wdt *dw_wdt,
if (dw_wdt->timeouts[idx].sec >= timeout)
*top_val = dw_wdt->timeouts[idx].top_val;
return dw_wdt->timeouts[idx].sec;
static unsigned int dw_wdt_get_min_timeout(struct dw_wdt *dw_wdt)
if (dw_wdt->timeouts[idx].sec)
return dw_wdt->timeouts[idx].sec;
static unsigned int dw_wdt_get_max_timeout_ms(struct dw_wdt *dw_wdt)
struct dw_wdt_timeout *timeout = &dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1];
static unsigned int dw_wdt_get_timeout(struct dw_wdt *dw_wdt)
int top_val = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
if (dw_wdt->timeouts[idx].top_val == top_val)
return dw_wdt->timeouts[idx].sec * dw_wdt->rmod;
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt->regs +
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
timeout = dw_wdt_find_best_top(dw_wdt, DIV_ROUND_UP(top_s, dw_wdt->rmod),
if (dw_wdt->rmod == DW_WDT_RMOD_IRQ)
dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
wdd->timeout = timeout * dw_wdt->rmod;
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
dw_wdt_update_mode(dw_wdt, req ? DW_WDT_RMOD_IRQ : DW_WDT_RMOD_RESET);
static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt)
u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
if (dw_wdt->rmod == DW_WDT_RMOD_IRQ)
writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
dw_wdt_ping(&dw_wdt->wdd);
dw_wdt_arm_system_reset(dw_wdt);
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
if (!dw_wdt->rst) {
reset_control_assert(dw_wdt->rst);
reset_control_deassert(dw_wdt->rst);
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET);
if (dw_wdt_is_enabled(dw_wdt))
dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET);
dw_wdt_arm_system_reset(dw_wdt);
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
val = readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET);
sec = val / dw_wdt->rate;
if (dw_wdt->rmod == DW_WDT_RMOD_IRQ) {
val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET);
struct dw_wdt *dw_wdt = devid;
val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET);
watchdog_notify_pretimeout(&dw_wdt->wdd);
struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
dw_wdt->control = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
dw_wdt->timeout = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
clk_disable_unprepare(dw_wdt->pclk);
clk_disable_unprepare(dw_wdt->clk);
struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
int err = clk_prepare_enable(dw_wdt->clk);
err = clk_prepare_enable(dw_wdt->pclk);
clk_disable_unprepare(dw_wdt->clk);
writel(dw_wdt->timeout, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
writel(dw_wdt->control, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
dw_wdt_ping(&dw_wdt->wdd);
static void dw_wdt_handle_tops(struct dw_wdt *dw_wdt, const u32 *tops)
tout.sec = tops[val] / dw_wdt->rate;
do_div(msec, dw_wdt->rate);
dst = &dw_wdt->timeouts[tidx];
dw_wdt->timeouts[val] = tout;
static int dw_wdt_init_timeouts(struct dw_wdt *dw_wdt, struct device *dev)
data = readl(dw_wdt->regs + WDOG_COMP_PARAMS_1_REG_OFFSET);
dw_wdt_handle_tops(dw_wdt, tops);
if (!dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1].sec) {
static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt)
struct device *dev = dw_wdt->wdd.parent;
regset->base = dw_wdt->regs;
dw_wdt->dbgfs_dir = debugfs_create_dir(dev_name(dev), NULL);
debugfs_create_regset32("registers", 0444, dw_wdt->dbgfs_dir, regset);
static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt)
debugfs_remove_recursive(dw_wdt->dbgfs_dir);
static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt) {}
static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt) {}
struct dw_wdt *dw_wdt;
dw_wdt = devm_kzalloc(dev, sizeof(*dw_wdt), GFP_KERNEL);
if (!dw_wdt)
dw_wdt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dw_wdt->regs))
return PTR_ERR(dw_wdt->regs);
dw_wdt->clk = devm_clk_get_enabled(dev, "tclk");
if (IS_ERR(dw_wdt->clk)) {
dw_wdt->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(dw_wdt->clk))
return PTR_ERR(dw_wdt->clk);
dw_wdt->rate = clk_get_rate(dw_wdt->clk);
if (dw_wdt->rate == 0)
dw_wdt->pclk = devm_clk_get_optional_enabled(dev, "pclk");
if (IS_ERR(dw_wdt->pclk))
return PTR_ERR(dw_wdt->pclk);
dw_wdt->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
if (IS_ERR(dw_wdt->rst))
return PTR_ERR(dw_wdt->rst);
dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET);
pdev->name, dw_wdt);
dw_wdt->wdd.info = &dw_wdt_pt_ident;
dw_wdt->wdd.info = &dw_wdt_ident;
reset_control_deassert(dw_wdt->rst);
ret = dw_wdt_init_timeouts(dw_wdt, dev);
wdd = &dw_wdt->wdd;
wdd->min_timeout = dw_wdt_get_min_timeout(dw_wdt);
wdd->max_hw_heartbeat_ms = dw_wdt_get_max_timeout_ms(dw_wdt);
watchdog_set_drvdata(wdd, dw_wdt);
if (dw_wdt_is_enabled(dw_wdt)) {
wdd->timeout = dw_wdt_get_timeout(dw_wdt);
platform_set_drvdata(pdev, dw_wdt);
dw_wdt_dbgfs_init(dw_wdt);
reset_control_assert(dw_wdt->rst);
struct dw_wdt *dw_wdt = platform_get_drvdata(pdev);
dw_wdt_dbgfs_clear(dw_wdt);
watchdog_unregister_device(&dw_wdt->wdd);
reset_control_assert(dw_wdt->rst);