stm_timer
static void nxp_stm_clocksource_setcnt(struct stm_timer *stm_timer, u32 cnt)
writel(cnt, STM_CNT(stm_timer->base));
struct stm_timer *stm_timer = cs_to_stm(cs);
return (u64)nxp_stm_clocksource_getcnt(stm_timer);
static void nxp_stm_module_enable(struct stm_timer *stm_timer)
reg = readl(STM_CR(stm_timer->base));
writel(reg, STM_CR(stm_timer->base));
static void nxp_stm_module_disable(struct stm_timer *stm_timer)
reg = readl(STM_CR(stm_timer->base));
writel(reg, STM_CR(stm_timer->base));
static void nxp_stm_module_put(struct stm_timer *stm_timer)
if (atomic_dec_and_test(&stm_timer->refcnt))
nxp_stm_module_disable(stm_timer);
static void nxp_stm_module_get(struct stm_timer *stm_timer)
if (atomic_inc_return(&stm_timer->refcnt) == 1)
nxp_stm_module_enable(stm_timer);
struct stm_timer *stm_timer = cs_to_stm(cs);
nxp_stm_module_get(stm_timer);
struct stm_timer *stm_timer = cs_to_stm(cs);
nxp_stm_module_put(stm_timer);
struct stm_timer *stm_timer = cs_to_stm(cs);
stm_timer->counter = nxp_stm_clocksource_getcnt(stm_timer);
struct stm_timer *stm_timer = cs_to_stm(cs);
nxp_stm_clocksource_setcnt(stm_timer, stm_timer->counter);
struct stm_timer *stm_timer = data;
clocksource_unregister(&stm_timer->cs);
static int nxp_stm_clocksource_init(struct device *dev, struct stm_timer *stm_timer,
stm_timer->base = base;
stm_timer->rate = clk_get_rate(clk);
stm_timer->cs.name = name;
stm_timer->cs.rating = 460;
stm_timer->cs.read = nxp_stm_clocksource_read;
stm_timer->cs.enable = nxp_stm_clocksource_enable;
stm_timer->cs.disable = nxp_stm_clocksource_disable;
stm_timer->cs.suspend = nxp_stm_clocksource_suspend;
stm_timer->cs.resume = nxp_stm_clocksource_resume;
stm_timer->cs.mask = CLOCKSOURCE_MASK(32);
stm_timer->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
stm_timer->cs.owner = THIS_MODULE;
ret = clocksource_register_hz(&stm_timer->cs, stm_timer->rate);
ret = devm_add_action_or_reset(dev, devm_clocksource_unregister, stm_timer);
stm_sched_clock = stm_timer;
sched_clock_register(nxp_stm_read_sched_clock, 32, stm_timer->rate);
static int nxp_stm_clockevent_read_counter(struct stm_timer *stm_timer)
return readl(STM_CNT(stm_timer->base));
static void nxp_stm_clockevent_disable(struct stm_timer *stm_timer)
writel(0, STM_CCR0(stm_timer->base));
static void nxp_stm_clockevent_enable(struct stm_timer *stm_timer)
writel(STM_CCR_CEN, STM_CCR0(stm_timer->base));
struct stm_timer *stm_timer = ced_to_stm(ced);
nxp_stm_clockevent_disable(stm_timer);
struct stm_timer *stm_timer = ced_to_stm(ced);
nxp_stm_clockevent_disable(stm_timer);
stm_timer->delta = delta;
val = nxp_stm_clockevent_read_counter(stm_timer) + delta;
writel(val, STM_CMP0(stm_timer->base));
if (val > nxp_stm_clockevent_read_counter(stm_timer) + delta)
nxp_stm_clockevent_enable(stm_timer);
struct stm_timer *stm_timer = ced_to_stm(ced);
return nxp_stm_clockevent_set_next_event(stm_timer->rate, ced);
struct stm_timer *stm_timer = ced_to_stm(ced);
nxp_stm_module_put(stm_timer);
struct stm_timer *stm_timer = ced_to_stm(ced);
nxp_stm_module_get(stm_timer);
static int nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm_timer *stm_timer,
stm_timer->base = base;
stm_timer->rate = clk_get_rate(clk);
stm_timer->ced.name = name;
stm_timer->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
stm_timer->ced.set_state_shutdown = nxp_stm_clockevent_shutdown;
stm_timer->ced.set_state_periodic = nxp_stm_clockevent_set_periodic;
stm_timer->ced.set_next_event = nxp_stm_clockevent_set_next_event;
stm_timer->ced.suspend = nxp_stm_clockevent_suspend;
stm_timer->ced.resume = nxp_stm_clockevent_resume;
stm_timer->ced.cpumask = cpumask_of(cpu);
stm_timer->ced.rating = 460;
stm_timer->ced.irq = irq;
stm_timer->ced.owner = THIS_MODULE;
per_cpu(stm_timers, cpu) = stm_timer;
nxp_stm_module_get(stm_timer);
struct stm_timer *stm_timer = per_cpu(stm_timers, cpu);
if (WARN_ON(!stm_timer))
ret = irq_force_affinity(stm_timer->ced.irq, cpumask_of(cpu));
clockevents_config_and_register(&stm_timer->ced, stm_timer->rate,
(stm_timer->rate / MICRO) * 2, ULONG_MAX);
struct stm_timer *stm_timer = dev_id;
struct clock_event_device *ced = &stm_timer->ced;
writel(STM_CIR_CIF, STM_CIR0(stm_timer->base));
val = nxp_stm_clockevent_read_counter(stm_timer) + stm_timer->delta;
writel(val, STM_CMP0(stm_timer->base));
nxp_stm_clockevent_disable(stm_timer);
struct stm_timer *stm_timer;
stm_timer = devm_kzalloc(dev, sizeof(*stm_timer), GFP_KERNEL);
if (!stm_timer)
IRQF_TIMER | IRQF_NOBALANCING, name, stm_timer);
ret = nxp_stm_clocksource_init(dev, stm_timer, name, base, clk);
ret = nxp_stm_clockevent_per_cpu_init(dev, stm_timer, name,
static DEFINE_PER_CPU(struct stm_timer *, stm_timers);
static struct stm_timer *stm_sched_clock;
static struct stm_timer *cs_to_stm(struct clocksource *cs)
return container_of(cs, struct stm_timer, cs);
static struct stm_timer *ced_to_stm(struct clock_event_device *ced)
return container_of(ced, struct stm_timer, ced);
static u32 nxp_stm_clocksource_getcnt(struct stm_timer *stm_timer)
return readl(STM_CNT(stm_timer->base));