arch/loongarch/kernel/traps.c
632
unsigned long era = exception_era(regs);
arch/loongarch/kernel/traps.c
649
if (__get_inst(&insn.word, (u32 *)era, user))
arch/loongarch/kernel/traps.c
723
unsigned long era = exception_era(regs);
arch/loongarch/kernel/traps.c
729
if (__get_inst(&opcode, (u32 *)era, user))
arch/loongarch/kernel/traps.c
860
unsigned int __user *era = (unsigned int __user *)exception_era(regs);
arch/loongarch/kernel/traps.c
872
if (unlikely(get_user(opcode, era) < 0)) {
arch/x86/events/intel/core.c
3840
struct er_account *era;
arch/x86/events/intel/core.c
3853
era = &cpuc->shared_regs->regs[idx];
arch/x86/events/intel/core.c
3858
raw_spin_lock_irqsave(&era->lock, flags);
arch/x86/events/intel/core.c
3860
if (!atomic_read(&era->ref) || era->config == reg->config) {
arch/x86/events/intel/core.c
3886
era->config = reg->config;
arch/x86/events/intel/core.c
3887
era->reg = reg->reg;
arch/x86/events/intel/core.c
3890
atomic_inc(&era->ref);
arch/x86/events/intel/core.c
3900
raw_spin_unlock_irqrestore(&era->lock, flags);
arch/x86/events/intel/core.c
3904
raw_spin_unlock_irqrestore(&era->lock, flags);
arch/x86/events/intel/core.c
3913
struct er_account *era;
arch/x86/events/intel/core.c
3926
era = &cpuc->shared_regs->regs[reg->idx];
arch/x86/events/intel/core.c
3929
atomic_dec(&era->ref);
drivers/crypto/caam/caamalg.c
1301
if (ctrlpriv->era < 3)
drivers/crypto/caam/caamalg.c
170
ctrlpriv->era);
drivers/crypto/caam/caamalg.c
189
ctrlpriv->era);
drivers/crypto/caam/caamalg.c
1910
if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
drivers/crypto/caam/caamalg.c
273
false, ctrlpriv->era);
drivers/crypto/caam/caamalg.c
295
nonce, ctx1_iv_off, false, ctrlpriv->era);
drivers/crypto/caam/caamalg.c
319
ctx1_iv_off, false, ctrlpriv->era);
drivers/crypto/caam/caamalg.c
3696
if (priv->era >= 6 && uses_dkp)
drivers/crypto/caam/caamalg.c
3855
if (priv->era < 10) {
drivers/crypto/caam/caamalg.c
613
if (ctrlpriv->era >= 6) {
drivers/crypto/caam/caamalg.c
927
if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) {
drivers/crypto/caam/caamalg_desc.c
137
unsigned int icvsize, int era)
drivers/crypto/caam/caamalg_desc.c
146
if (era < 6) {
drivers/crypto/caam/caamalg_desc.c
214
const bool is_rfc3686, u32 *nonce, int era)
drivers/crypto/caam/caamalg_desc.c
234
if (era < 6) {
drivers/crypto/caam/caamalg_desc.c
293
int era)
drivers/crypto/caam/caamalg_desc.c
296
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
drivers/crypto/caam/caamalg_desc.c
322
if (is_qi || era < 3) {
drivers/crypto/caam/caamalg_desc.c
388
const u32 ctx1_iv_off, const bool is_qi, int era)
drivers/crypto/caam/caamalg_desc.c
391
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
drivers/crypto/caam/caamalg_desc.c
418
if (is_qi || era < 3) {
drivers/crypto/caam/caamalg_desc.c
505
const bool is_qi, int era)
drivers/crypto/caam/caamalg_desc.c
511
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
drivers/crypto/caam/caamalg_desc.c
561
if (is_qi || era < 3) {
drivers/crypto/caam/caamalg_desc.c
58
unsigned int icvsize, int era)
drivers/crypto/caam/caamalg_desc.c
67
if (era < 6) {
drivers/crypto/caam/caamalg_desc.h
55
unsigned int icvsize, int era);
drivers/crypto/caam/caamalg_desc.h
58
unsigned int icvsize, int era);
drivers/crypto/caam/caamalg_desc.h
64
const bool is_qi, int era);
drivers/crypto/caam/caamalg_desc.h
70
const u32 ctx1_iv_off, const bool is_qi, int era);
drivers/crypto/caam/caamalg_desc.h
76
const bool is_qi, int era);
drivers/crypto/caam/caamalg_qi.c
1436
if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
drivers/crypto/caam/caamalg_qi.c
151
ctx1_iv_off, true, ctrlpriv->era);
drivers/crypto/caam/caamalg_qi.c
167
ctrlpriv->era);
drivers/crypto/caam/caamalg_qi.c
184
ctx1_iv_off, true, ctrlpriv->era);
drivers/crypto/caam/caamalg_qi.c
222
if (ctrlpriv->era >= 6) {
drivers/crypto/caam/caamalg_qi.c
2479
if (priv->era >= 6 && uses_dkp)
drivers/crypto/caam/caamalg_qi.c
2636
if (priv->era < 10) {
drivers/crypto/caam/caamalg_qi.c
754
if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) {
drivers/crypto/caam/caamalg_qi2.c
1082
if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
drivers/crypto/caam/caamalg_qi2.c
1494
if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
drivers/crypto/caam/caamalg_qi2.c
1544
if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
drivers/crypto/caam/caamalg_qi2.c
245
priv->sec_attr.era);
drivers/crypto/caam/caamalg_qi2.c
249
ctx1_iv_off, true, priv->sec_attr.era);
drivers/crypto/caam/caamalg_qi2.c
271
priv->sec_attr.era);
drivers/crypto/caam/caamalg_qi2.c
3151
ctx->ctx_len, true, priv->sec_attr.era);
drivers/crypto/caam/caamalg_qi2.c
3163
ctx->ctx_len, false, priv->sec_attr.era);
drivers/crypto/caam/caamalg_qi2.c
3175
ctx->ctx_len, true, priv->sec_attr.era);
drivers/crypto/caam/caamalg_qi2.c
3187
ctx->ctx_len, false, priv->sec_attr.era);
drivers/crypto/caam/caamhash.c
1808
if (priv->era >= 6) {
drivers/crypto/caam/caamhash.c
1956
if (priv->era < 10) {
drivers/crypto/caam/caamhash.c
218
ctx->ctx_len, true, ctrlpriv->era);
drivers/crypto/caam/caamhash.c
229
ctx->ctx_len, false, ctrlpriv->era);
drivers/crypto/caam/caamhash.c
239
ctx->ctx_len, true, ctrlpriv->era);
drivers/crypto/caam/caamhash.c
250
ctx->ctx_len, false, ctrlpriv->era);
drivers/crypto/caam/caamhash.c
458
if (ctrlpriv->era >= 6) {
drivers/crypto/caam/caamhash_desc.c
29
int digestsize, int ctx_len, bool import_ctx, int era)
drivers/crypto/caam/caamhash_desc.c
43
if (era < 6)
drivers/crypto/caam/caamhash_desc.h
25
int digestsize, int ctx_len, bool import_ctx, int era);
drivers/crypto/caam/caampkc.c
1180
if (priv->era < 10) {
drivers/crypto/caam/caamprng.c
216
if (priv->era < 10)
drivers/crypto/caam/caamrng.c
275
if (priv->era < 10)
drivers/crypto/caam/ctrl.c
1079
ctrlpriv->era = caam_get_era(perfmon);
drivers/crypto/caam/ctrl.c
1123
if (ctrlpriv->era < 10) {
drivers/crypto/caam/ctrl.c
1148
ctrlpriv->era);
drivers/crypto/caam/ctrl.c
160
if (ctrlpriv->era < 10)
drivers/crypto/caam/ctrl.c
425
u8 era;
drivers/crypto/caam/ctrl.c
442
u8 maj_rev, era;
drivers/crypto/caam/ctrl.c
447
era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
drivers/crypto/caam/ctrl.c
448
if (era) /* This is '0' prior to CAAM ERA-6 */
drivers/crypto/caam/ctrl.c
449
return era;
drivers/crypto/caam/ctrl.c
457
return id[i].era;
drivers/crypto/caam/ctrl.c
657
if (ctrlpriv->era < 10) {
drivers/crypto/caam/dpseci.c
322
attr->era = rsp_params->era;
drivers/crypto/caam/dpseci.h
232
u8 era;
drivers/crypto/caam/dpseci_cmd.h
107
u8 era;
drivers/crypto/caam/intern.h
122
int era; /* CAAM Era (internal HW revision) */
drivers/md/dm-era-target.c
1141
uint32_t era;
drivers/md/dm-era-target.c
1165
s->era = md->current_era;
drivers/md/dm-era-target.c
1212
static bool block_size_is_power_of_two(struct era *era)
drivers/md/dm-era-target.c
1214
return era->sectors_per_block_shift >= 0;
drivers/md/dm-era-target.c
1217
static dm_block_t get_block(struct era *era, struct bio *bio)
drivers/md/dm-era-target.c
1221
if (!block_size_is_power_of_two(era))
drivers/md/dm-era-target.c
1222
(void) sector_div(block_nr, era->sectors_per_block);
drivers/md/dm-era-target.c
1224
block_nr >>= era->sectors_per_block_shift;
drivers/md/dm-era-target.c
1229
static void remap_to_origin(struct era *era, struct bio *bio)
drivers/md/dm-era-target.c
1231
bio_set_dev(bio, era->origin_dev->bdev);
drivers/md/dm-era-target.c
1239
static void wake_worker(struct era *era)
drivers/md/dm-era-target.c
1241
if (!atomic_read(&era->suspended))
drivers/md/dm-era-target.c
1242
queue_work(era->wq, &era->worker);
drivers/md/dm-era-target.c
1245
static void process_old_eras(struct era *era)
drivers/md/dm-era-target.c
1249
if (!era->digest.step)
drivers/md/dm-era-target.c
1252
r = era->digest.step(era->md, &era->digest);
drivers/md/dm-era-target.c
1255
era->digest.step = NULL;
drivers/md/dm-era-target.c
1257
} else if (era->digest.step)
drivers/md/dm-era-target.c
1258
wake_worker(era);
drivers/md/dm-era-target.c
1261
static void process_deferred_bios(struct era *era)
drivers/md/dm-era-target.c
1269
struct writeset *ws = era->md->current_writeset;
drivers/md/dm-era-target.c
1274
spin_lock(&era->deferred_lock);
drivers/md/dm-era-target.c
1275
bio_list_merge_init(&deferred_bios, &era->deferred_bios);
drivers/md/dm-era-target.c
1276
spin_unlock(&era->deferred_lock);
drivers/md/dm-era-target.c
1282
r = writeset_test_and_set(&era->md->bitset_info, ws,
drivers/md/dm-era-target.c
1283
get_block(era, bio));
drivers/md/dm-era-target.c
1297
r = metadata_commit(era->md);
drivers/md/dm-era-target.c
1313
set_bit(get_block(era, bio), ws->bits);
drivers/md/dm-era-target.c
1320
static void process_rpc_calls(struct era *era)
drivers/md/dm-era-target.c
1328
spin_lock(&era->rpc_lock);
drivers/md/dm-era-target.c
1329
list_splice_init(&era->rpc_calls, &calls);
drivers/md/dm-era-target.c
1330
spin_unlock(&era->rpc_lock);
drivers/md/dm-era-target.c
1333
rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg);
drivers/md/dm-era-target.c
1338
r = metadata_commit(era->md);
drivers/md/dm-era-target.c
1348
static void kick_off_digest(struct era *era)
drivers/md/dm-era-target.c
1350
if (era->md->archived_writesets) {
drivers/md/dm-era-target.c
1351
era->md->archived_writesets = false;
drivers/md/dm-era-target.c
1352
metadata_digest_start(era->md, &era->digest);
drivers/md/dm-era-target.c
1358
struct era *era = container_of(ws, struct era, worker);
drivers/md/dm-era-target.c
1360
kick_off_digest(era);
drivers/md/dm-era-target.c
1361
process_old_eras(era);
drivers/md/dm-era-target.c
1362
process_deferred_bios(era);
drivers/md/dm-era-target.c
1363
process_rpc_calls(era);
drivers/md/dm-era-target.c
1366
static void defer_bio(struct era *era, struct bio *bio)
drivers/md/dm-era-target.c
1368
spin_lock(&era->deferred_lock);
drivers/md/dm-era-target.c
1369
bio_list_add(&era->deferred_bios, bio);
drivers/md/dm-era-target.c
1370
spin_unlock(&era->deferred_lock);
drivers/md/dm-era-target.c
1372
wake_worker(era);
drivers/md/dm-era-target.c
1378
static int perform_rpc(struct era *era, struct rpc *rpc)
drivers/md/dm-era-target.c
1383
spin_lock(&era->rpc_lock);
drivers/md/dm-era-target.c
1384
list_add(&rpc->list, &era->rpc_calls);
drivers/md/dm-era-target.c
1385
spin_unlock(&era->rpc_lock);
drivers/md/dm-era-target.c
1387
wake_worker(era);
drivers/md/dm-era-target.c
1393
static int in_worker0(struct era *era, int (*fn)(struct era_metadata *md))
drivers/md/dm-era-target.c
1400
return perform_rpc(era, &rpc);
drivers/md/dm-era-target.c
1403
static int in_worker1(struct era *era,
drivers/md/dm-era-target.c
1412
return perform_rpc(era, &rpc);
drivers/md/dm-era-target.c
1415
static void start_worker(struct era *era)
drivers/md/dm-era-target.c
1417
atomic_set(&era->suspended, 0);
drivers/md/dm-era-target.c
1420
static void stop_worker(struct era *era)
drivers/md/dm-era-target.c
1422
atomic_set(&era->suspended, 1);
drivers/md/dm-era-target.c
1423
drain_workqueue(era->wq);
drivers/md/dm-era-target.c
1431
static void era_destroy(struct era *era)
drivers/md/dm-era-target.c
1433
if (era->md)
drivers/md/dm-era-target.c
1434
metadata_close(era->md);
drivers/md/dm-era-target.c
1436
if (era->wq)
drivers/md/dm-era-target.c
1437
destroy_workqueue(era->wq);
drivers/md/dm-era-target.c
1439
if (era->origin_dev)
drivers/md/dm-era-target.c
1440
dm_put_device(era->ti, era->origin_dev);
drivers/md/dm-era-target.c
1442
if (era->metadata_dev)
drivers/md/dm-era-target.c
1443
dm_put_device(era->ti, era->metadata_dev);
drivers/md/dm-era-target.c
1445
kfree(era);
drivers/md/dm-era-target.c
1448
static dm_block_t calc_nr_blocks(struct era *era)
drivers/md/dm-era-target.c
1450
return dm_sector_div_up(era->ti->len, era->sectors_per_block);
drivers/md/dm-era-target.c
1468
struct era *era;
drivers/md/dm-era-target.c
1476
era = kzalloc_obj(*era);
drivers/md/dm-era-target.c
1477
if (!era) {
drivers/md/dm-era-target.c
1482
era->ti = ti;
drivers/md/dm-era-target.c
1485
&era->metadata_dev);
drivers/md/dm-era-target.c
1488
era_destroy(era);
drivers/md/dm-era-target.c
1493
&era->origin_dev);
drivers/md/dm-era-target.c
1496
era_destroy(era);
drivers/md/dm-era-target.c
1500
r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy);
drivers/md/dm-era-target.c
1503
era_destroy(era);
drivers/md/dm-era-target.c
1507
r = dm_set_target_max_io_len(ti, era->sectors_per_block);
drivers/md/dm-era-target.c
1510
era_destroy(era);
drivers/md/dm-era-target.c
1514
if (!valid_block_size(era->sectors_per_block)) {
drivers/md/dm-era-target.c
1516
era_destroy(era);
drivers/md/dm-era-target.c
1519
if (era->sectors_per_block & (era->sectors_per_block - 1))
drivers/md/dm-era-target.c
1520
era->sectors_per_block_shift = -1;
drivers/md/dm-era-target.c
1522
era->sectors_per_block_shift = __ffs(era->sectors_per_block);
drivers/md/dm-era-target.c
1524
md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true);
drivers/md/dm-era-target.c
1527
era_destroy(era);
drivers/md/dm-era-target.c
1530
era->md = md;
drivers/md/dm-era-target.c
1532
era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
drivers/md/dm-era-target.c
1533
if (!era->wq) {
drivers/md/dm-era-target.c
1535
era_destroy(era);
drivers/md/dm-era-target.c
1538
INIT_WORK(&era->worker, do_work);
drivers/md/dm-era-target.c
1540
spin_lock_init(&era->deferred_lock);
drivers/md/dm-era-target.c
1541
bio_list_init(&era->deferred_bios);
drivers/md/dm-era-target.c
1543
spin_lock_init(&era->rpc_lock);
drivers/md/dm-era-target.c
1544
INIT_LIST_HEAD(&era->rpc_calls);
drivers/md/dm-era-target.c
1546
ti->private = era;
drivers/md/dm-era-target.c
1562
struct era *era = ti->private;
drivers/md/dm-era-target.c
1563
dm_block_t block = get_block(era, bio);
drivers/md/dm-era-target.c
1570
remap_to_origin(era, bio);
drivers/md/dm-era-target.c
1577
!metadata_current_marked(era->md, block)) {
drivers/md/dm-era-target.c
1578
defer_bio(era, bio);
drivers/md/dm-era-target.c
1588
struct era *era = ti->private;
drivers/md/dm-era-target.c
1590
r = in_worker0(era, metadata_era_archive);
drivers/md/dm-era-target.c
1596
stop_worker(era);
drivers/md/dm-era-target.c
1598
r = metadata_commit(era->md);
drivers/md/dm-era-target.c
1608
struct era *era = ti->private;
drivers/md/dm-era-target.c
1609
dm_block_t new_size = calc_nr_blocks(era);
drivers/md/dm-era-target.c
1611
if (era->nr_blocks != new_size) {
drivers/md/dm-era-target.c
1612
r = metadata_resize(era->md, &new_size);
drivers/md/dm-era-target.c
1618
r = metadata_commit(era->md);
drivers/md/dm-era-target.c
1624
era->nr_blocks = new_size;
drivers/md/dm-era-target.c
1627
start_worker(era);
drivers/md/dm-era-target.c
1629
r = in_worker0(era, metadata_era_rollover);
drivers/md/dm-era-target.c
1648
struct era *era = ti->private;
drivers/md/dm-era-target.c
1655
r = in_worker1(era, metadata_get_stats, &stats);
drivers/md/dm-era-target.c
1663
(unsigned int) stats.era);
drivers/md/dm-era-target.c
1672
format_dev_t(buf, era->metadata_dev->bdev->bd_dev);
drivers/md/dm-era-target.c
1674
format_dev_t(buf, era->origin_dev->bdev->bd_dev);
drivers/md/dm-era-target.c
1675
DMEMIT("%s %u", buf, era->sectors_per_block);
drivers/md/dm-era-target.c
1692
struct era *era = ti->private;
drivers/md/dm-era-target.c
1700
return in_worker0(era, metadata_checkpoint);
drivers/md/dm-era-target.c
1703
return in_worker0(era, metadata_take_snap);
drivers/md/dm-era-target.c
1706
return in_worker0(era, metadata_drop_snap);
drivers/md/dm-era-target.c
1720
struct era *era = ti->private;
drivers/md/dm-era-target.c
1722
return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
drivers/md/dm-era-target.c
1727
struct era *era = ti->private;
drivers/md/dm-era-target.c
1734
if (io_opt_sectors < era->sectors_per_block ||
drivers/md/dm-era-target.c
1735
do_div(io_opt_sectors, era->sectors_per_block)) {
drivers/md/dm-era-target.c
1737
limits->io_opt = era->sectors_per_block << SECTOR_SHIFT;
drivers/md/dm-era-target.c
1757
module_dm(era);
drivers/md/dm-era-target.c
682
uint32_t era;
drivers/md/dm-era-target.c
698
uint64_t key = d->era;
drivers/md/dm-era-target.c
759
d->era = key;