ks_sa_rng
struct ks_sa_rng *ks_sa_rng = container_of(rng, struct ks_sa_rng, rng);
unsigned long clk_rate = clk_get_rate(ks_sa_rng->clk);
regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
writel(0, &ks_sa_rng->reg_rng->control);
writel(value, &ks_sa_rng->reg_rng->control);
writel(value, &ks_sa_rng->reg_rng->config);
writel(0, &ks_sa_rng->reg_rng->intmask);
value = readl(&ks_sa_rng->reg_rng->control);
writel(value, &ks_sa_rng->reg_rng->control);
ks_sa_rng->refill_delay_ns = refill_delay_ns(clk_rate);
ks_sa_rng->ready_ts = ktime_get_ns() +
struct ks_sa_rng *ks_sa_rng = container_of(rng, struct ks_sa_rng, rng);
writel(0, &ks_sa_rng->reg_rng->control);
regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
struct ks_sa_rng *ks_sa_rng = container_of(rng, struct ks_sa_rng, rng);
data[0] = readl(&ks_sa_rng->reg_rng->output_l);
data[1] = readl(&ks_sa_rng->reg_rng->output_h);
writel(TRNG_INTACK_REG_READY, &ks_sa_rng->reg_rng->intack);
ks_sa_rng->ready_ts = ktime_get_ns() + ks_sa_rng->refill_delay_ns;
struct ks_sa_rng *ks_sa_rng = container_of(rng, struct ks_sa_rng, rng);
if (wait && now < ks_sa_rng->ready_ts) {
DIV_ROUND_UP((u32)(ks_sa_rng->ready_ts - now), 1000);
ready = readl(&ks_sa_rng->reg_rng->status);
struct ks_sa_rng *ks_sa_rng;
ks_sa_rng = devm_kzalloc(dev, sizeof(*ks_sa_rng), GFP_KERNEL);
if (!ks_sa_rng)
ks_sa_rng->rng = (struct hwrng) {
ks_sa_rng->reg_rng = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ks_sa_rng->reg_rng))
return PTR_ERR(ks_sa_rng->reg_rng);
ks_sa_rng->regmap_cfg =
if (IS_ERR(ks_sa_rng->regmap_cfg))
ks_sa_rng->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(ks_sa_rng->clk))
return dev_err_probe(dev, PTR_ERR(ks_sa_rng->clk), "Failed to get clock\n");
return devm_hwrng_register(&pdev->dev, &ks_sa_rng->rng);