rk_rng
static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask)
static inline void rk_rng_writel(struct rk_rng *rng, u32 val, u32 offset)
static inline u32 rk_rng_readl(struct rk_rng *rng, u32 offset)
static int rk_rng_enable_clks(struct rk_rng *rk_rng)
ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks);
dev_err(rk_rng->dev, "Failed to enable clocks: %d\n", ret);
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
ret = rk_rng_enable_clks(rk_rng);
writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT);
rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_LEN_256_BIT |
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
rk_rng_write_ctl(rk_rng, 0, TRNG_RNG_CTL_MASK);
clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
ret = pm_runtime_resume_and_get(rk_rng->dev);
rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_START, TRNG_RNG_CTL_START);
ret = readl_poll_timeout(rk_rng->base + TRNG_RNG_CTL, reg,
memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read);
pm_runtime_put_sync_autosuspend(rk_rng->dev);
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
return rk_rng_enable_clks(rk_rng);
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
ret = pm_runtime_resume_and_get(rk_rng->dev);
rk_rng_writel(rk_rng, RKRNG_CTRL_REQ_TRNG | (RKRNG_CTRL_REQ_TRNG << 16),
if (readl_poll_timeout(rk_rng->base + RKRNG_STATE, val,
dev_err(rk_rng->dev, "timed out waiting for data\n");
rk_rng_writel(rk_rng, RKRNG_STATE_TRNG_RDY, RKRNG_STATE);
memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read);
pm_runtime_put_sync_autosuspend(rk_rng->dev);
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
ret = rk_rng_enable_clks(rk_rng);
version = rk_rng_readl(rk_rng, TRNG_V1_VERSION);
dev_err(rk_rng->dev,
if (readl_poll_timeout(rk_rng->base + TRNG_V1_STAT, status,
dev_err(rk_rng->dev, "timed out waiting for hwrng to reseed\n");
istat = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
rk_rng_writel(rk_rng, istat, TRNG_V1_ISTAT);
rk_rng_writel(rk_rng, RK_TRNG_V1_AUTO_RESEED_CNT / 16, TRNG_V1_AUTO_RQSTS);
clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
ret = pm_runtime_resume_and_get(rk_rng->dev);
reg = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
rk_rng_writel(rk_rng, TRNG_V1_MODE_256_BIT, TRNG_V1_MODE);
rk_rng_writel(rk_rng, TRNG_V1_CTRL_RAND, TRNG_V1_CTRL);
ret = readl_poll_timeout_atomic(rk_rng->base + TRNG_V1_ISTAT, reg,
memcpy_fromio(buf, rk_rng->base + TRNG_V1_RAND0, to_read);
rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
rk_rng_writel(rk_rng, TRNG_V1_CTRL_NOP, TRNG_V1_CTRL);
pm_runtime_put_sync_autosuspend(rk_rng->dev);
struct rk_rng *rk_rng;
rk_rng = devm_kzalloc(dev, sizeof(*rk_rng), GFP_KERNEL);
if (!rk_rng)
rk_rng->soc_data = of_device_get_match_data(dev);
rk_rng->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rk_rng->base))
return PTR_ERR(rk_rng->base);
rk_rng->clk_num = devm_clk_bulk_get_all(dev, &rk_rng->clk_bulks);
if (rk_rng->clk_num < 0)
return dev_err_probe(dev, rk_rng->clk_num,
if (rk_rng->soc_data->reset_optional)
platform_set_drvdata(pdev, rk_rng);
rk_rng->rng.name = dev_driver_string(dev);
rk_rng->rng.init = rk_rng->soc_data->rk_rng_init;
rk_rng->rng.cleanup = rk_rng->soc_data->rk_rng_cleanup;
rk_rng->rng.read = rk_rng->soc_data->rk_rng_read;
rk_rng->dev = dev;
rk_rng->rng.quality = rk_rng->soc_data->quality;
ret = devm_hwrng_register(dev, &rk_rng->rng);
struct rk_rng *rk_rng = dev_get_drvdata(dev);
rk_rng->soc_data->rk_rng_cleanup(&rk_rng->rng);
struct rk_rng *rk_rng = dev_get_drvdata(dev);
return rk_rng->soc_data->rk_rng_init(&rk_rng->rng);