drivers/gpu/drm/mediatek/mtk_drm_drv.c
922
struct device_node *next = NULL, *prev, *vdo = dev->parent->of_node;
drivers/gpu/drm/mediatek/mtk_drm_drv.c
930
ret = mtk_drm_of_get_ddp_ep_cid(vdo, 0, cpath, &next, &temp_path[idx]);
drivers/md/dm-vdo/action-manager.c
106
vdo_action_scheduler_fn scheduler, struct vdo *vdo,
drivers/md/dm-vdo/action-manager.c
128
vdo_initialize_completion(&manager->completion, vdo, VDO_ACTION_COMPLETION);
drivers/md/dm-vdo/action-manager.h
83
struct vdo *vdo,
drivers/md/dm-vdo/block-map.c
1128
if (vdo_is_read_only(info->cache->vdo)) {
drivers/md/dm-vdo/block-map.c
1242
vdo_initialize_completion(completion, cache->vdo, VDO_PAGE_COMPLETION);
drivers/md/dm-vdo/block-map.c
1247
if (page_completion->writable && vdo_is_read_only(cache->vdo)) {
drivers/md/dm-vdo/block-map.c
1777
static bool __must_check is_invalid_tree_entry(const struct vdo *vdo,
drivers/md/dm-vdo/block-map.c
1790
return !vdo_is_physical_data_block(vdo->depot, mapping->pbn);
drivers/md/dm-vdo/block-map.c
193
result = create_metadata_vio(cache->vdo, VIO_TYPE_BLOCK_MAP,
drivers/md/dm-vdo/block-map.c
2178
vdo_add_recovery_journal_entry(completion->vdo->recovery_journal, data_vio);
drivers/md/dm-vdo/block-map.c
2752
struct vdo *vdo = map->vdo;
drivers/md/dm-vdo/block-map.c
2758
zone->thread_id = vdo->thread_config.logical_threads[zone_number];
drivers/md/dm-vdo/block-map.c
2780
result = make_vio_pool(vdo, BLOCK_MAP_VIO_POOL_SIZE, 1,
drivers/md/dm-vdo/block-map.c
2789
zone->page_cache.vdo = vdo;
drivers/md/dm-vdo/block-map.c
2888
struct vdo *vdo, struct recovery_journal *journal,
drivers/md/dm-vdo/block-map.c
2904
vdo->thread_config.logical_zone_count,
drivers/md/dm-vdo/block-map.c
2909
map->vdo = vdo;
drivers/md/dm-vdo/block-map.c
2924
map->zone_count = vdo->thread_config.logical_zone_count;
drivers/md/dm-vdo/block-map.c
2935
map, schedule_era_advance, vdo,
drivers/md/dm-vdo/block-map.c
575
struct vdo *vdo = cache->vdo;
drivers/md/dm-vdo/block-map.c
577
if ((result != VDO_READ_ONLY) && !vdo_is_read_only(vdo)) {
drivers/md/dm-vdo/block-map.c
580
vdo_enter_read_only_mode(vdo, result);
drivers/md/dm-vdo/block-map.c
646
(vdo_is_read_only(zone->block_map->vdo) ?
drivers/md/dm-vdo/block-map.c
653
vdo_enter_read_only_mode(zone->block_map->vdo, result);
drivers/md/dm-vdo/block-map.c
688
vdo_enter_read_only_mode(cache->zone->block_map->vdo, result);
drivers/md/dm-vdo/block-map.h
255
struct vdo *vdo;
drivers/md/dm-vdo/block-map.h
334
block_count_t logical_blocks, struct vdo *vdo,
drivers/md/dm-vdo/block-map.h
51
struct vdo *vdo;
drivers/md/dm-vdo/completion.c
114
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/completion.c
117
if (VDO_ASSERT(thread_id < vdo->thread_config.thread_count,
drivers/md/dm-vdo/completion.c
120
vdo->thread_config.thread_count) != VDO_SUCCESS)
drivers/md/dm-vdo/completion.c
126
vdo_enqueue_work_queue(vdo->threads[thread_id].queue, completion);
drivers/md/dm-vdo/completion.c
52
struct vdo *vdo,
drivers/md/dm-vdo/completion.c
56
completion->vdo = vdo;
drivers/md/dm-vdo/completion.h
31
void vdo_initialize_completion(struct vdo_completion *completion, struct vdo *vdo,
drivers/md/dm-vdo/data-vio.c
1001
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.cpu_thread),
drivers/md/dm-vdo/data-vio.c
1012
assert_on_vdo_cpu_thread(completion->vdo, __func__);
drivers/md/dm-vdo/data-vio.c
1024
assert_on_vdo_cpu_thread(completion->vdo, __func__);
drivers/md/dm-vdo/data-vio.c
1263
struct data_vio_pool *pool = completion->vdo->data_vio_pool;
drivers/md/dm-vdo/data-vio.c
1283
struct vdo *vdo = vdo_from_data_vio(data_vio);
drivers/md/dm-vdo/data-vio.c
1303
(READ_ONCE(vdo->read_only_notifier.read_only_error) == VDO_SUCCESS) &&
drivers/md/dm-vdo/data-vio.c
1329
if (vdo_is_read_only(completion->vdo))
drivers/md/dm-vdo/data-vio.c
1344
vdo_enter_read_only_mode(completion->vdo, completion->result);
drivers/md/dm-vdo/data-vio.c
1693
vdo_add_recovery_journal_entry(completion->vdo->recovery_journal, data_vio);
drivers/md/dm-vdo/data-vio.c
1920
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/data-vio.c
1922
VDO_ASSERT_LOG_ONLY((!vdo_uses_bio_ack_queue(vdo) ||
drivers/md/dm-vdo/data-vio.c
1923
(vdo_get_callback_thread_id() == vdo->thread_config.bio_ack_thread)),
drivers/md/dm-vdo/data-vio.c
246
struct vdo *vdo = vdo_from_data_vio(data_vio);
drivers/md/dm-vdo/data-vio.c
254
lock->zone = &vdo->logical_zones->zones[zone_number];
drivers/md/dm-vdo/data-vio.c
261
struct vdo *vdo = vdo_from_data_vio(data_vio);
drivers/md/dm-vdo/data-vio.c
263
if (vdo_is_read_only(vdo)) {
drivers/md/dm-vdo/data-vio.c
275
struct vdo *vdo = vdo_from_data_vio(data_vio);
drivers/md/dm-vdo/data-vio.c
287
vdo_count_bios(&vdo->stats.bios_acknowledged, bio);
drivers/md/dm-vdo/data-vio.c
289
vdo_count_bios(&vdo->stats.bios_acknowledged_partial, bio);
drivers/md/dm-vdo/data-vio.c
426
struct vdo *vdo = vdo_from_data_vio(data_vio);
drivers/md/dm-vdo/data-vio.c
432
if (data_vio->logical.lbn >= vdo->states.vdo.config.logical_blocks) {
drivers/md/dm-vdo/data-vio.c
528
static void launch_bio(struct vdo *vdo, struct data_vio *data_vio, struct bio *bio)
drivers/md/dm-vdo/data-vio.c
552
vdo_count_bios(&vdo->stats.bios_in_partial, bio);
drivers/md/dm-vdo/data-vio.c
556
vdo_count_bios(&vdo->stats.bios_in_partial, bio);
drivers/md/dm-vdo/data-vio.c
575
lbn = (bio->bi_iter.bi_sector - vdo->starting_sector_offset) / VDO_SECTORS_PER_BLOCK;
drivers/md/dm-vdo/data-vio.c
583
launch_bio(limiter->pool->completion.vdo, data_vio, bio);
drivers/md/dm-vdo/data-vio.c
783
static int initialize_data_vio(struct data_vio *data_vio, struct vdo *vdo)
drivers/md/dm-vdo/data-vio.c
813
vdo_initialize_completion(&data_vio->decrement_completion, vdo,
drivers/md/dm-vdo/data-vio.c
815
initialize_vio(&data_vio->vio, bio, 1, VIO_TYPE_DATA, VIO_PRIORITY_DATA, vdo);
drivers/md/dm-vdo/data-vio.c
838
int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
drivers/md/dm-vdo/data-vio.c
860
vdo_initialize_completion(&pool->completion, vdo, VDO_DATA_VIO_POOL_COMPLETION);
drivers/md/dm-vdo/data-vio.c
862
process_release_callback, vdo->thread_config.cpu_thread,
drivers/md/dm-vdo/data-vio.c
874
result = initialize_data_vio(data_vio, vdo);
drivers/md/dm-vdo/data-vio.c
982
launch_bio(pool->completion.vdo, data_vio, bio);
drivers/md/dm-vdo/data-vio.c
999
static void assert_on_vdo_cpu_thread(const struct vdo *vdo, const char *name)
drivers/md/dm-vdo/data-vio.h
313
static inline struct vdo *vdo_from_data_vio(struct data_vio *data_vio)
drivers/md/dm-vdo/data-vio.h
315
return data_vio->vio.completion.vdo;
drivers/md/dm-vdo/data-vio.h
331
int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
drivers/md/dm-vdo/data-vio.h
637
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/data-vio.h
639
if (!vdo_uses_bio_ack_queue(vdo)) {
drivers/md/dm-vdo/data-vio.h
645
vdo->thread_config.bio_ack_thread);
drivers/md/dm-vdo/dedupe.c
1605
struct vdo *vdo = vdo_from_data_vio(data_vio);
drivers/md/dm-vdo/dedupe.c
1629
atomic64_inc(&vdo->stats.invalid_advice_pbn_count);
drivers/md/dm-vdo/dedupe.c
1633
result = vdo_get_physical_zone(vdo, advice->pbn, &advice->zone);
drivers/md/dm-vdo/dedupe.c
1638
atomic64_inc(&vdo->stats.invalid_advice_pbn_count);
drivers/md/dm-vdo/dedupe.c
2169
static int initialize_index(struct vdo *vdo, struct hash_zones *zones)
drivers/md/dm-vdo/dedupe.c
2173
struct volume_geometry geometry = vdo->geometry;
drivers/md/dm-vdo/dedupe.c
2194
.bdev = vdo->device_config->owned_device->bdev,
drivers/md/dm-vdo/dedupe.c
2206
result = vdo_make_thread(vdo, vdo->thread_config.dedupe_thread, &uds_queue_type,
drivers/md/dm-vdo/dedupe.c
2214
vdo_initialize_completion(&zones->completion, vdo, VDO_HASH_ZONES_COMPLETION);
drivers/md/dm-vdo/dedupe.c
2216
vdo->thread_config.dedupe_thread);
drivers/md/dm-vdo/dedupe.c
2335
report_dedupe_timeouts(completion->vdo->hash_zones, timed_out);
drivers/md/dm-vdo/dedupe.c
2349
static int __must_check initialize_zone(struct vdo *vdo, struct hash_zones *zones,
drivers/md/dm-vdo/dedupe.c
2362
zone->thread_id = vdo->thread_config.hash_zone_threads[zone_number];
drivers/md/dm-vdo/dedupe.c
2363
vdo_initialize_completion(&zone->completion, vdo, VDO_HASH_ZONE_COMPLETION);
drivers/md/dm-vdo/dedupe.c
2392
return vdo_make_default_thread(vdo, zone->thread_id);
drivers/md/dm-vdo/dedupe.c
2411
int vdo_make_hash_zones(struct vdo *vdo, struct hash_zones **zones_ptr)
drivers/md/dm-vdo/dedupe.c
2416
zone_count_t zone_count = vdo->thread_config.hash_zone_count;
drivers/md/dm-vdo/dedupe.c
2426
result = initialize_index(vdo, zones);
drivers/md/dm-vdo/dedupe.c
2436
result = initialize_zone(vdo, zones, z);
drivers/md/dm-vdo/dedupe.c
2444
vdo->thread_config.admin_thread, zones, NULL,
drivers/md/dm-vdo/dedupe.c
2445
vdo, &zones->manager);
drivers/md/dm-vdo/dedupe.c
2548
vdo_schedule_operation(zones->manager, parent->vdo->suspend_type, suspend_index,
drivers/md/dm-vdo/dedupe.c
2579
struct device_config *config = parent->vdo->device_config;
drivers/md/dm-vdo/dedupe.c
2619
if (vdo_is_read_only(parent->vdo)) {
drivers/md/dm-vdo/dedupe.c
2904
struct vdo *vdo = vdo_from_data_vio(data_vio);
drivers/md/dm-vdo/dedupe.c
2909
if (!READ_ONCE(vdo->hash_zones->dedupe_flag)) {
drivers/md/dm-vdo/dedupe.c
2916
atomic64_inc(&vdo->hash_zones->dedupe_context_busy);
drivers/md/dm-vdo/dedupe.h
81
int __must_check vdo_make_hash_zones(struct vdo *vdo, struct hash_zones **zones_ptr);
drivers/md/dm-vdo/dm-vdo-target.c
1012
static block_count_t __must_check get_underlying_device_block_count(const struct vdo *vdo)
drivers/md/dm-vdo/dm-vdo-target.c
1014
return bdev_nr_bytes(vdo_get_backing_device(vdo)) / VDO_BLOCK_SIZE;
drivers/md/dm-vdo/dm-vdo-target.c
1017
static int __must_check process_vdo_message_locked(struct vdo *vdo, unsigned int argc,
drivers/md/dm-vdo/dm-vdo-target.c
1022
vdo_set_compressing(vdo, true);
drivers/md/dm-vdo/dm-vdo-target.c
1027
vdo_set_compressing(vdo, false);
drivers/md/dm-vdo/dm-vdo-target.c
1045
static int __must_check process_vdo_message(struct vdo *vdo, unsigned int argc,
drivers/md/dm-vdo/dm-vdo-target.c
1058
return vdo_dump(vdo, argc, argv, "dmsetup message");
drivers/md/dm-vdo/dm-vdo-target.c
1062
vdo->dump_on_shutdown = true;
drivers/md/dm-vdo/dm-vdo-target.c
1071
return vdo_message_dedupe_index(vdo->hash_zones, argv[0]);
drivers/md/dm-vdo/dm-vdo-target.c
1074
if (atomic_cmpxchg(&vdo->processing_message, 0, 1) != 0)
drivers/md/dm-vdo/dm-vdo-target.c
1077
result = process_vdo_message_locked(vdo, argc, argv);
drivers/md/dm-vdo/dm-vdo-target.c
1081
atomic_set(&vdo->processing_message, 0);
drivers/md/dm-vdo/dm-vdo-target.c
1089
struct vdo *vdo;
drivers/md/dm-vdo/dm-vdo-target.c
1097
vdo = get_vdo_for_target(ti);
drivers/md/dm-vdo/dm-vdo-target.c
1099
vdo_register_thread_device_id(&instance_thread, &vdo->instance);
drivers/md/dm-vdo/dm-vdo-target.c
1106
vdo_write_stats(vdo, result_buffer, maxlen);
drivers/md/dm-vdo/dm-vdo-target.c
1109
vdo_write_config(vdo, &result_buffer, &maxlen);
drivers/md/dm-vdo/dm-vdo-target.c
1112
result = vdo_status_to_errno(process_vdo_message(vdo, argc, argv));
drivers/md/dm-vdo/dm-vdo-target.c
1137
static bool vdo_uses_device(struct vdo *vdo, const void *context)
drivers/md/dm-vdo/dm-vdo-target.c
1141
return vdo_get_backing_device(vdo)->bd_dev == config->owned_device->bdev->bd_dev;
drivers/md/dm-vdo/dm-vdo-target.c
1149
static thread_id_t __must_check get_thread_id_for_phase(struct vdo *vdo)
drivers/md/dm-vdo/dm-vdo-target.c
1151
switch (vdo->admin.phase) {
drivers/md/dm-vdo/dm-vdo-target.c
1156
return vdo->thread_config.packer_thread;
drivers/md/dm-vdo/dm-vdo-target.c
1160
return vdo->thread_config.cpu_thread;
drivers/md/dm-vdo/dm-vdo-target.c
1165
return vdo->thread_config.journal_thread;
drivers/md/dm-vdo/dm-vdo-target.c
1168
return vdo->thread_config.admin_thread;
drivers/md/dm-vdo/dm-vdo-target.c
1172
static struct vdo_completion *prepare_admin_completion(struct vdo *vdo,
drivers/md/dm-vdo/dm-vdo-target.c
1176
struct vdo_completion *completion = &vdo->admin.completion;
drivers/md/dm-vdo/dm-vdo-target.c
1184
completion->callback_thread_id = get_thread_id_for_phase(vdo);
drivers/md/dm-vdo/dm-vdo-target.c
1196
static u32 advance_phase(struct vdo *vdo)
drivers/md/dm-vdo/dm-vdo-target.c
1198
u32 phase = vdo->admin.phase++;
drivers/md/dm-vdo/dm-vdo-target.c
1200
vdo->admin.completion.callback_thread_id = get_thread_id_for_phase(vdo);
drivers/md/dm-vdo/dm-vdo-target.c
1201
vdo->admin.completion.requeue = true;
drivers/md/dm-vdo/dm-vdo-target.c
1209
static int perform_admin_operation(struct vdo *vdo, u32 starting_phase,
drivers/md/dm-vdo/dm-vdo-target.c
1214
struct vdo_administrator *admin = &vdo->admin;
drivers/md/dm-vdo/dm-vdo-target.c
1225
vdo_launch_completion(prepare_admin_completion(vdo, callback, error_handler));
drivers/md/dm-vdo/dm-vdo-target.c
1244
static void assert_admin_phase_thread(struct vdo *vdo, const char *what)
drivers/md/dm-vdo/dm-vdo-target.c
1246
VDO_ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo),
drivers/md/dm-vdo/dm-vdo-target.c
1248
ADMIN_PHASE_NAMES[vdo->admin.phase]);
drivers/md/dm-vdo/dm-vdo-target.c
1257
struct vdo_administrator *admin = &completion->vdo->admin;
drivers/md/dm-vdo/dm-vdo-target.c
1273
static int __must_check decode_from_super_block(struct vdo *vdo)
drivers/md/dm-vdo/dm-vdo-target.c
1275
const struct device_config *config = vdo->device_config;
drivers/md/dm-vdo/dm-vdo-target.c
1278
result = vdo_decode_component_states(vdo->super_block.buffer, &vdo->geometry,
drivers/md/dm-vdo/dm-vdo-target.c
1279
&vdo->states);
drivers/md/dm-vdo/dm-vdo-target.c
1283
vdo_set_state(vdo, vdo->states.vdo.state);
drivers/md/dm-vdo/dm-vdo-target.c
1284
vdo->load_state = vdo->states.vdo.state;
drivers/md/dm-vdo/dm-vdo-target.c
1290
if (vdo->states.vdo.config.logical_blocks < config->logical_blocks) {
drivers/md/dm-vdo/dm-vdo-target.c
1293
(unsigned long long) vdo->states.vdo.config.logical_blocks);
drivers/md/dm-vdo/dm-vdo-target.c
1294
vdo->states.vdo.config.logical_blocks = config->logical_blocks;
drivers/md/dm-vdo/dm-vdo-target.c
1297
result = vdo_validate_component_states(&vdo->states, vdo->geometry.nonce,
drivers/md/dm-vdo/dm-vdo-target.c
1303
vdo->layout = vdo->states.layout;
drivers/md/dm-vdo/dm-vdo-target.c
1318
static int __must_check decode_vdo(struct vdo *vdo)
drivers/md/dm-vdo/dm-vdo-target.c
1324
result = decode_from_super_block(vdo);
drivers/md/dm-vdo/dm-vdo-target.c
1326
vdo_destroy_component_states(&vdo->states);
drivers/md/dm-vdo/dm-vdo-target.c
1330
maximum_age = vdo_convert_maximum_age(vdo->device_config->block_map_maximum_age);
drivers/md/dm-vdo/dm-vdo-target.c
1332
vdo_get_recovery_journal_length(vdo->states.vdo.config.recovery_journal_size);
drivers/md/dm-vdo/dm-vdo-target.c
1345
result = vdo_enable_read_only_entry(vdo);
drivers/md/dm-vdo/dm-vdo-target.c
1349
partition = vdo_get_known_partition(&vdo->layout,
drivers/md/dm-vdo/dm-vdo-target.c
1351
result = vdo_decode_recovery_journal(vdo->states.recovery_journal,
drivers/md/dm-vdo/dm-vdo-target.c
1352
vdo->states.vdo.nonce, vdo, partition,
drivers/md/dm-vdo/dm-vdo-target.c
1353
vdo->states.vdo.complete_recoveries,
drivers/md/dm-vdo/dm-vdo-target.c
1354
vdo->states.vdo.config.recovery_journal_size,
drivers/md/dm-vdo/dm-vdo-target.c
1355
&vdo->recovery_journal);
drivers/md/dm-vdo/dm-vdo-target.c
1359
partition = vdo_get_known_partition(&vdo->layout, VDO_SLAB_SUMMARY_PARTITION);
drivers/md/dm-vdo/dm-vdo-target.c
1360
result = vdo_decode_slab_depot(vdo->states.slab_depot, vdo, partition,
drivers/md/dm-vdo/dm-vdo-target.c
1361
&vdo->depot);
drivers/md/dm-vdo/dm-vdo-target.c
1365
result = vdo_decode_block_map(vdo->states.block_map,
drivers/md/dm-vdo/dm-vdo-target.c
1366
vdo->states.vdo.config.logical_blocks, vdo,
drivers/md/dm-vdo/dm-vdo-target.c
1367
vdo->recovery_journal, vdo->states.vdo.nonce,
drivers/md/dm-vdo/dm-vdo-target.c
1368
vdo->device_config->cache_size, maximum_age,
drivers/md/dm-vdo/dm-vdo-target.c
1369
&vdo->block_map);
drivers/md/dm-vdo/dm-vdo-target.c
1373
result = vdo_make_physical_zones(vdo, &vdo->physical_zones);
drivers/md/dm-vdo/dm-vdo-target.c
1378
result = vdo_make_logical_zones(vdo, &vdo->logical_zones);
drivers/md/dm-vdo/dm-vdo-target.c
1382
return vdo_make_hash_zones(vdo, &vdo->hash_zones);
drivers/md/dm-vdo/dm-vdo-target.c
1391
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/dm-vdo-target.c
1394
assert_admin_phase_thread(vdo, __func__);
drivers/md/dm-vdo/dm-vdo-target.c
1396
switch (advance_phase(vdo)) {
drivers/md/dm-vdo/dm-vdo-target.c
1398
result = vdo_start_operation(&vdo->admin.state,
drivers/md/dm-vdo/dm-vdo-target.c
1405
vdo_load_super_block(vdo, completion);
drivers/md/dm-vdo/dm-vdo-target.c
1409
vdo_continue_completion(completion, decode_vdo(vdo));
drivers/md/dm-vdo/dm-vdo-target.c
1438
static void set_device_config(struct dm_target *ti, struct vdo *vdo,
drivers/md/dm-vdo/dm-vdo-target.c
1442
list_add_tail(&config->config_list, &vdo->device_config_list);
drivers/md/dm-vdo/dm-vdo-target.c
1443
config->vdo = vdo;
drivers/md/dm-vdo/dm-vdo-target.c
1451
struct vdo *vdo;
drivers/md/dm-vdo/dm-vdo-target.c
1467
vdo = vdo_find_matching(vdo_uses_device, config);
drivers/md/dm-vdo/dm-vdo-target.c
1468
if (vdo != NULL) {
drivers/md/dm-vdo/dm-vdo-target.c
1470
vdo->device_config->parent_device_name);
drivers/md/dm-vdo/dm-vdo-target.c
1475
result = vdo_make(instance, config, &ti->error, &vdo);
drivers/md/dm-vdo/dm-vdo-target.c
1479
vdo_destroy(vdo);
drivers/md/dm-vdo/dm-vdo-target.c
1483
result = perform_admin_operation(vdo, PRE_LOAD_PHASE_START, pre_load_callback,
drivers/md/dm-vdo/dm-vdo-target.c
1491
vdo_destroy(vdo);
drivers/md/dm-vdo/dm-vdo-target.c
1495
set_device_config(ti, vdo, config);
drivers/md/dm-vdo/dm-vdo-target.c
1496
vdo->device_config = config;
drivers/md/dm-vdo/dm-vdo-target.c
1501
static bool __must_check vdo_is_named(struct vdo *vdo, const void *context)
drivers/md/dm-vdo/dm-vdo-target.c
1503
struct dm_target *ti = vdo->device_config->owning_target;
drivers/md/dm-vdo/dm-vdo-target.c
1641
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/dm-vdo-target.c
1643
assert_admin_phase_thread(vdo, __func__);
drivers/md/dm-vdo/dm-vdo-target.c
1646
if (vdo_is_read_only(vdo))
drivers/md/dm-vdo/dm-vdo-target.c
1649
if (vdo_in_recovery_mode(vdo))
drivers/md/dm-vdo/dm-vdo-target.c
1668
static int grow_layout(struct vdo *vdo, block_count_t old_size, block_count_t new_size)
drivers/md/dm-vdo/dm-vdo-target.c
1673
if (vdo->next_layout.size == new_size) {
drivers/md/dm-vdo/dm-vdo-target.c
1679
if (vdo->partition_copier == NULL) {
drivers/md/dm-vdo/dm-vdo-target.c
1680
vdo->partition_copier = dm_kcopyd_client_create(NULL);
drivers/md/dm-vdo/dm-vdo-target.c
1681
if (IS_ERR(vdo->partition_copier)) {
drivers/md/dm-vdo/dm-vdo-target.c
1682
result = PTR_ERR(vdo->partition_copier);
drivers/md/dm-vdo/dm-vdo-target.c
1683
vdo->partition_copier = NULL;
drivers/md/dm-vdo/dm-vdo-target.c
1689
vdo_uninitialize_layout(&vdo->next_layout);
drivers/md/dm-vdo/dm-vdo-target.c
1695
result = vdo_initialize_layout(new_size, vdo->layout.start,
drivers/md/dm-vdo/dm-vdo-target.c
1696
get_partition_size(&vdo->layout,
drivers/md/dm-vdo/dm-vdo-target.c
1698
get_partition_size(&vdo->layout,
drivers/md/dm-vdo/dm-vdo-target.c
1700
get_partition_size(&vdo->layout,
drivers/md/dm-vdo/dm-vdo-target.c
1702
&vdo->next_layout);
drivers/md/dm-vdo/dm-vdo-target.c
1704
dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier));
drivers/md/dm-vdo/dm-vdo-target.c
1710
get_partition_size(&vdo->next_layout,
drivers/md/dm-vdo/dm-vdo-target.c
1712
get_partition_size(&vdo->next_layout,
drivers/md/dm-vdo/dm-vdo-target.c
1716
vdo_uninitialize_layout(&vdo->next_layout);
drivers/md/dm-vdo/dm-vdo-target.c
1717
dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier));
drivers/md/dm-vdo/dm-vdo-target.c
1724
static int prepare_to_grow_physical(struct vdo *vdo, block_count_t new_physical_blocks)
drivers/md/dm-vdo/dm-vdo-target.c
1727
block_count_t current_physical_blocks = vdo->states.vdo.config.physical_blocks;
drivers/md/dm-vdo/dm-vdo-target.c
1733
result = perform_admin_operation(vdo, PREPARE_GROW_PHYSICAL_PHASE_START,
drivers/md/dm-vdo/dm-vdo-target.c
1740
result = grow_layout(vdo, current_physical_blocks, new_physical_blocks);
drivers/md/dm-vdo/dm-vdo-target.c
1744
result = vdo_prepare_to_grow_slab_depot(vdo->depot,
drivers/md/dm-vdo/dm-vdo-target.c
1745
vdo_get_known_partition(&vdo->next_layout,
drivers/md/dm-vdo/dm-vdo-target.c
1748
vdo_uninitialize_layout(&vdo->next_layout);
drivers/md/dm-vdo/dm-vdo-target.c
1816
struct vdo *vdo)
drivers/md/dm-vdo/dm-vdo-target.c
1819
bool may_grow = (vdo_get_admin_state(vdo) != VDO_ADMIN_STATE_PRE_LOADED);
drivers/md/dm-vdo/dm-vdo-target.c
1821
result = validate_new_device_config(config, vdo->device_config, may_grow,
drivers/md/dm-vdo/dm-vdo-target.c
1826
if (config->logical_blocks > vdo->device_config->logical_blocks) {
drivers/md/dm-vdo/dm-vdo-target.c
1827
block_count_t logical_blocks = vdo->states.vdo.config.logical_blocks;
drivers/md/dm-vdo/dm-vdo-target.c
1834
result = vdo_prepare_to_grow_block_map(vdo->block_map,
drivers/md/dm-vdo/dm-vdo-target.c
1844
if (config->physical_blocks > vdo->device_config->physical_blocks) {
drivers/md/dm-vdo/dm-vdo-target.c
1845
result = prepare_to_grow_physical(vdo, config->physical_blocks);
drivers/md/dm-vdo/dm-vdo-target.c
1864
if (strcmp(config->parent_device_name, vdo->device_config->parent_device_name) != 0) {
drivers/md/dm-vdo/dm-vdo-target.c
1868
vdo->device_config->parent_device_name,
drivers/md/dm-vdo/dm-vdo-target.c
1876
unsigned int argc, char **argv, struct vdo *vdo)
drivers/md/dm-vdo/dm-vdo-target.c
1886
result = prepare_to_modify(ti, config, vdo);
drivers/md/dm-vdo/dm-vdo-target.c
1892
set_device_config(ti, vdo, config);
drivers/md/dm-vdo/dm-vdo-target.c
1901
struct vdo *vdo;
drivers/md/dm-vdo/dm-vdo-target.c
1905
vdo = vdo_find_matching(vdo_is_named, device_name);
drivers/md/dm-vdo/dm-vdo-target.c
1906
if (vdo == NULL) {
drivers/md/dm-vdo/dm-vdo-target.c
1909
vdo_register_thread_device_id(&instance_thread, &vdo->instance);
drivers/md/dm-vdo/dm-vdo-target.c
1910
result = update_existing_vdo(device_name, ti, argc, argv, vdo);
drivers/md/dm-vdo/dm-vdo-target.c
1921
struct vdo *vdo = vdo_forget(config->vdo);
drivers/md/dm-vdo/dm-vdo-target.c
1924
if (list_empty(&vdo->device_config_list)) {
drivers/md/dm-vdo/dm-vdo-target.c
1928
unsigned int instance = vdo->instance;
drivers/md/dm-vdo/dm-vdo-target.c
1936
if (vdo->dump_on_shutdown)
drivers/md/dm-vdo/dm-vdo-target.c
1937
vdo_dump_all(vdo, "device shutdown");
drivers/md/dm-vdo/dm-vdo-target.c
1939
vdo_destroy(vdo_forget(vdo));
drivers/md/dm-vdo/dm-vdo-target.c
1944
} else if (config == vdo->device_config) {
drivers/md/dm-vdo/dm-vdo-target.c
1949
vdo->device_config = list_first_entry(&vdo->device_config_list,
drivers/md/dm-vdo/dm-vdo-target.c
1969
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/dm-vdo-target.c
1971
switch (vdo_get_state(vdo)) {
drivers/md/dm-vdo/dm-vdo-target.c
1974
vdo_set_state(vdo, VDO_CLEAN);
drivers/md/dm-vdo/dm-vdo-target.c
1990
vdo_save_components(vdo, completion);
drivers/md/dm-vdo/dm-vdo-target.c
1999
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/dm-vdo-target.c
2000
struct admin_state *state = &vdo->admin.state;
drivers/md/dm-vdo/dm-vdo-target.c
2003
assert_admin_phase_thread(vdo, __func__);
drivers/md/dm-vdo/dm-vdo-target.c
2005
switch (advance_phase(vdo)) {
drivers/md/dm-vdo/dm-vdo-target.c
2013
vdo_start_operation(state, vdo->suspend_type));
drivers/md/dm-vdo/dm-vdo-target.c
2023
if (vdo_in_read_only_mode(vdo))
drivers/md/dm-vdo/dm-vdo-target.c
2026
vdo_drain_packer(vdo->packer, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2030
drain_data_vio_pool(vdo->data_vio_pool, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2034
vdo_drain_hash_zones(vdo->hash_zones, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2038
vdo_drain_flusher(vdo->flusher, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2047
result = vdo_synchronous_flush(vdo);
drivers/md/dm-vdo/dm-vdo-target.c
2049
vdo_enter_read_only_mode(vdo, result);
drivers/md/dm-vdo/dm-vdo-target.c
2051
vdo_drain_logical_zones(vdo->logical_zones,
drivers/md/dm-vdo/dm-vdo-target.c
2056
vdo_drain_block_map(vdo->block_map, vdo_get_admin_state_code(state),
drivers/md/dm-vdo/dm-vdo-target.c
2061
vdo_drain_recovery_journal(vdo->recovery_journal,
drivers/md/dm-vdo/dm-vdo-target.c
2066
vdo_drain_slab_depot(vdo->depot, vdo_get_admin_state_code(state),
drivers/md/dm-vdo/dm-vdo-target.c
2095
struct vdo *vdo = get_vdo_for_target(ti);
drivers/md/dm-vdo/dm-vdo-target.c
2100
vdo_register_thread_device_id(&instance_thread, &vdo->instance);
drivers/md/dm-vdo/dm-vdo-target.c
2101
device_name = vdo_get_device_name(vdo->device_config->owning_target);
drivers/md/dm-vdo/dm-vdo-target.c
2108
result = perform_admin_operation(vdo, SUSPEND_PHASE_START, suspend_callback,
drivers/md/dm-vdo/dm-vdo-target.c
2119
vdo_get_admin_state(vdo)->name);
drivers/md/dm-vdo/dm-vdo-target.c
2134
static bool was_new(const struct vdo *vdo)
drivers/md/dm-vdo/dm-vdo-target.c
2136
return (vdo->load_state == VDO_NEW);
drivers/md/dm-vdo/dm-vdo-target.c
2145
static bool __must_check requires_repair(const struct vdo *vdo)
drivers/md/dm-vdo/dm-vdo-target.c
2147
switch (vdo_get_state(vdo)) {
drivers/md/dm-vdo/dm-vdo-target.c
2165
static enum slab_depot_load_type get_load_type(struct vdo *vdo)
drivers/md/dm-vdo/dm-vdo-target.c
2167
if (vdo_state_requires_read_only_rebuild(vdo->load_state))
drivers/md/dm-vdo/dm-vdo-target.c
2170
if (vdo_state_requires_recovery(vdo->load_state))
drivers/md/dm-vdo/dm-vdo-target.c
2182
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/dm-vdo-target.c
2185
assert_admin_phase_thread(vdo, __func__);
drivers/md/dm-vdo/dm-vdo-target.c
2187
switch (advance_phase(vdo)) {
drivers/md/dm-vdo/dm-vdo-target.c
2189
result = vdo_start_operation(&vdo->admin.state, VDO_ADMIN_STATE_LOADING);
drivers/md/dm-vdo/dm-vdo-target.c
2196
vdo_open_recovery_journal(vdo->recovery_journal, vdo->depot,
drivers/md/dm-vdo/dm-vdo-target.c
2197
vdo->block_map);
drivers/md/dm-vdo/dm-vdo-target.c
2202
vdo_set_dedupe_state_normal(vdo->hash_zones);
drivers/md/dm-vdo/dm-vdo-target.c
2203
if (vdo_is_read_only(vdo)) {
drivers/md/dm-vdo/dm-vdo-target.c
2212
if (requires_repair(vdo)) {
drivers/md/dm-vdo/dm-vdo-target.c
2217
vdo_load_slab_depot(vdo->depot,
drivers/md/dm-vdo/dm-vdo-target.c
2218
(was_new(vdo) ? VDO_ADMIN_STATE_FORMATTING :
drivers/md/dm-vdo/dm-vdo-target.c
2224
vdo_set_state(vdo, VDO_DIRTY);
drivers/md/dm-vdo/dm-vdo-target.c
2225
vdo_save_components(vdo, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2229
vdo_initialize_block_map_from_journal(vdo->block_map,
drivers/md/dm-vdo/dm-vdo-target.c
2230
vdo->recovery_journal);
drivers/md/dm-vdo/dm-vdo-target.c
2231
vdo_prepare_slab_depot_to_allocate(vdo->depot, get_load_type(vdo),
drivers/md/dm-vdo/dm-vdo-target.c
2236
if (vdo_state_requires_recovery(vdo->load_state))
drivers/md/dm-vdo/dm-vdo-target.c
2237
vdo_enter_recovery_mode(vdo);
drivers/md/dm-vdo/dm-vdo-target.c
2239
vdo_scrub_all_unrecovered_slabs(vdo->depot, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2243
WRITE_ONCE(vdo->compressing, vdo->device_config->compression);
drivers/md/dm-vdo/dm-vdo-target.c
2244
if (vdo->device_config->deduplication) {
drivers/md/dm-vdo/dm-vdo-target.c
2249
vdo_start_dedupe_index(vdo->hash_zones, was_new(vdo));
drivers/md/dm-vdo/dm-vdo-target.c
2252
vdo->allocations_allowed = false;
drivers/md/dm-vdo/dm-vdo-target.c
2259
vdo_drain_recovery_journal(vdo->recovery_journal, VDO_ADMIN_STATE_SAVING,
drivers/md/dm-vdo/dm-vdo-target.c
2266
vdo->admin.phase = LOAD_PHASE_FINISHED;
drivers/md/dm-vdo/dm-vdo-target.c
2286
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/dm-vdo-target.c
2289
vdo->thread_config.admin_thread))
drivers/md/dm-vdo/dm-vdo-target.c
2292
if (vdo_state_requires_read_only_rebuild(vdo->load_state) &&
drivers/md/dm-vdo/dm-vdo-target.c
2293
(vdo->admin.phase == LOAD_PHASE_MAKE_DIRTY)) {
drivers/md/dm-vdo/dm-vdo-target.c
2295
vdo->admin.phase = LOAD_PHASE_DRAIN_JOURNAL;
drivers/md/dm-vdo/dm-vdo-target.c
2301
(vdo->admin.phase == LOAD_PHASE_MAKE_DIRTY)) {
drivers/md/dm-vdo/dm-vdo-target.c
2303
vdo->admin.phase = LOAD_PHASE_FINISHED;
drivers/md/dm-vdo/dm-vdo-target.c
2310
vdo->admin.phase = LOAD_PHASE_WAIT_FOR_READ_ONLY;
drivers/md/dm-vdo/dm-vdo-target.c
2311
vdo_enter_read_only_mode(vdo, completion->result);
drivers/md/dm-vdo/dm-vdo-target.c
2322
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/dm-vdo-target.c
2324
switch (vdo_get_state(vdo)) {
drivers/md/dm-vdo/dm-vdo-target.c
2327
vdo_set_state(vdo, VDO_DIRTY);
drivers/md/dm-vdo/dm-vdo-target.c
2328
vdo_save_components(vdo, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2352
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/dm-vdo-target.c
2355
assert_admin_phase_thread(vdo, __func__);
drivers/md/dm-vdo/dm-vdo-target.c
2357
switch (advance_phase(vdo)) {
drivers/md/dm-vdo/dm-vdo-target.c
2359
result = vdo_start_operation(&vdo->admin.state,
drivers/md/dm-vdo/dm-vdo-target.c
2374
vdo_resume_hash_zones(vdo->hash_zones, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2378
vdo_resume_slab_depot(vdo->depot, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2382
vdo_resume_recovery_journal(vdo->recovery_journal, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2386
vdo_resume_block_map(vdo->block_map, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2390
vdo_resume_logical_zones(vdo->logical_zones, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2395
bool was_enabled = vdo_get_compressing(vdo);
drivers/md/dm-vdo/dm-vdo-target.c
2396
bool enable = vdo->device_config->compression;
drivers/md/dm-vdo/dm-vdo-target.c
2399
WRITE_ONCE(vdo->compressing, enable);
drivers/md/dm-vdo/dm-vdo-target.c
2402
vdo_resume_packer(vdo->packer, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2407
vdo_resume_flusher(vdo->flusher, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2411
resume_data_vio_pool(vdo->data_vio_pool, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2432
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/dm-vdo-target.c
2435
assert_admin_phase_thread(vdo, __func__);
drivers/md/dm-vdo/dm-vdo-target.c
2437
switch (advance_phase(vdo)) {
drivers/md/dm-vdo/dm-vdo-target.c
2439
if (vdo_is_read_only(vdo)) {
drivers/md/dm-vdo/dm-vdo-target.c
2446
result = vdo_start_operation(&vdo->admin.state,
drivers/md/dm-vdo/dm-vdo-target.c
2453
vdo->states.vdo.config.logical_blocks = vdo->block_map->next_entry_count;
drivers/md/dm-vdo/dm-vdo-target.c
2454
vdo_save_components(vdo, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2458
vdo_grow_block_map(vdo->block_map, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2465
vdo_enter_read_only_mode(vdo, completion->result);
drivers/md/dm-vdo/dm-vdo-target.c
2481
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/dm-vdo-target.c
2483
if (vdo->admin.phase == GROW_LOGICAL_PHASE_GROW_BLOCK_MAP) {
drivers/md/dm-vdo/dm-vdo-target.c
2488
vdo->states.vdo.config.logical_blocks = vdo->block_map->entry_count;
drivers/md/dm-vdo/dm-vdo-target.c
2489
vdo_abandon_block_map_growth(vdo->block_map);
drivers/md/dm-vdo/dm-vdo-target.c
2492
vdo->admin.phase = GROW_LOGICAL_PHASE_ERROR;
drivers/md/dm-vdo/dm-vdo-target.c
2506
static int perform_grow_logical(struct vdo *vdo, block_count_t new_logical_blocks)
drivers/md/dm-vdo/dm-vdo-target.c
2510
if (vdo->device_config->logical_blocks == new_logical_blocks) {
drivers/md/dm-vdo/dm-vdo-target.c
2515
vdo_abandon_block_map_growth(vdo->block_map);
drivers/md/dm-vdo/dm-vdo-target.c
2521
if (vdo->block_map->next_entry_count != new_logical_blocks)
drivers/md/dm-vdo/dm-vdo-target.c
2524
result = perform_admin_operation(vdo, GROW_LOGICAL_PHASE_START,
drivers/md/dm-vdo/dm-vdo-target.c
2542
static void partition_to_region(struct partition *partition, struct vdo *vdo,
drivers/md/dm-vdo/dm-vdo-target.c
2545
physical_block_number_t pbn = partition->offset - vdo->geometry.bio_offset;
drivers/md/dm-vdo/dm-vdo-target.c
2548
.bdev = vdo_get_backing_device(vdo),
drivers/md/dm-vdo/dm-vdo-target.c
2561
static void copy_partition(struct vdo *vdo, enum partition_id id,
drivers/md/dm-vdo/dm-vdo-target.c
2565
struct partition *from = vdo_get_known_partition(&vdo->layout, id);
drivers/md/dm-vdo/dm-vdo-target.c
2566
struct partition *to = vdo_get_known_partition(&vdo->next_layout, id);
drivers/md/dm-vdo/dm-vdo-target.c
2568
partition_to_region(from, vdo, &read_region);
drivers/md/dm-vdo/dm-vdo-target.c
2569
partition_to_region(to, vdo, &write_regions[0]);
drivers/md/dm-vdo/dm-vdo-target.c
2570
dm_kcopyd_copy(vdo->partition_copier, &read_region, 1, write_regions, 0,
drivers/md/dm-vdo/dm-vdo-target.c
2582
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/dm-vdo-target.c
2585
assert_admin_phase_thread(vdo, __func__);
drivers/md/dm-vdo/dm-vdo-target.c
2587
switch (advance_phase(vdo)) {
drivers/md/dm-vdo/dm-vdo-target.c
2589
if (vdo_is_read_only(vdo)) {
drivers/md/dm-vdo/dm-vdo-target.c
2596
result = vdo_start_operation(&vdo->admin.state,
drivers/md/dm-vdo/dm-vdo-target.c
2604
copy_partition(vdo, VDO_RECOVERY_JOURNAL_PARTITION, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2608
copy_partition(vdo, VDO_SLAB_SUMMARY_PARTITION, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2612
vdo_uninitialize_layout(&vdo->layout);
drivers/md/dm-vdo/dm-vdo-target.c
2613
vdo->layout = vdo->next_layout;
drivers/md/dm-vdo/dm-vdo-target.c
2614
vdo_forget(vdo->next_layout.head);
drivers/md/dm-vdo/dm-vdo-target.c
2615
vdo->states.vdo.config.physical_blocks = vdo->layout.size;
drivers/md/dm-vdo/dm-vdo-target.c
2616
vdo_update_slab_depot_size(vdo->depot);
drivers/md/dm-vdo/dm-vdo-target.c
2617
vdo_save_components(vdo, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2621
vdo_use_new_slabs(vdo->depot, completion);
drivers/md/dm-vdo/dm-vdo-target.c
2625
vdo->depot->summary_origin =
drivers/md/dm-vdo/dm-vdo-target.c
2626
vdo_get_known_partition(&vdo->layout,
drivers/md/dm-vdo/dm-vdo-target.c
2628
vdo->recovery_journal->origin =
drivers/md/dm-vdo/dm-vdo-target.c
2629
vdo_get_known_partition(&vdo->layout,
drivers/md/dm-vdo/dm-vdo-target.c
2634
vdo_enter_read_only_mode(vdo, completion->result);
drivers/md/dm-vdo/dm-vdo-target.c
2641
vdo_uninitialize_layout(&vdo->next_layout);
drivers/md/dm-vdo/dm-vdo-target.c
2651
completion->vdo->admin.phase = GROW_PHYSICAL_PHASE_ERROR;
drivers/md/dm-vdo/dm-vdo-target.c
2665
static int perform_grow_physical(struct vdo *vdo, block_count_t new_physical_blocks)
drivers/md/dm-vdo/dm-vdo-target.c
2669
block_count_t old_physical_blocks = vdo->states.vdo.config.physical_blocks;
drivers/md/dm-vdo/dm-vdo-target.c
2675
if (new_physical_blocks != vdo->next_layout.size) {
drivers/md/dm-vdo/dm-vdo-target.c
2681
vdo_uninitialize_layout(&vdo->next_layout);
drivers/md/dm-vdo/dm-vdo-target.c
2682
vdo_abandon_new_slabs(vdo->depot);
drivers/md/dm-vdo/dm-vdo-target.c
2688
vdo_get_known_partition(&vdo->next_layout, VDO_SLAB_DEPOT_PARTITION)->count;
drivers/md/dm-vdo/dm-vdo-target.c
2689
prepared_depot_size = (vdo->depot->new_slabs == NULL) ? 0 : vdo->depot->new_size;
drivers/md/dm-vdo/dm-vdo-target.c
2693
result = perform_admin_operation(vdo, GROW_PHYSICAL_PHASE_START,
drivers/md/dm-vdo/dm-vdo-target.c
2714
static int __must_check apply_new_vdo_configuration(struct vdo *vdo,
drivers/md/dm-vdo/dm-vdo-target.c
2719
result = perform_grow_logical(vdo, config->logical_blocks);
drivers/md/dm-vdo/dm-vdo-target.c
2725
result = perform_grow_physical(vdo, config->physical_blocks);
drivers/md/dm-vdo/dm-vdo-target.c
2732
static int vdo_preresume_registered(struct dm_target *ti, struct vdo *vdo)
drivers/md/dm-vdo/dm-vdo-target.c
2739
backing_blocks = get_underlying_device_block_count(vdo);
drivers/md/dm-vdo/dm-vdo-target.c
2748
if (vdo_get_admin_state(vdo) == VDO_ADMIN_STATE_PRE_LOADED) {
drivers/md/dm-vdo/dm-vdo-target.c
2750
result = perform_admin_operation(vdo, LOAD_PHASE_START, load_callback,
drivers/md/dm-vdo/dm-vdo-target.c
2758
vdo->suspend_type = VDO_ADMIN_STATE_SUSPENDING;
drivers/md/dm-vdo/dm-vdo-target.c
2759
perform_admin_operation(vdo, SUSPEND_PHASE_START,
drivers/md/dm-vdo/dm-vdo-target.c
2772
vdo->suspend_type = VDO_ADMIN_STATE_STOPPING;
drivers/md/dm-vdo/dm-vdo-target.c
2773
perform_admin_operation(vdo, SUSPEND_PHASE_START,
drivers/md/dm-vdo/dm-vdo-target.c
2786
result = apply_new_vdo_configuration(vdo, config);
drivers/md/dm-vdo/dm-vdo-target.c
2793
vdo->device_config = config;
drivers/md/dm-vdo/dm-vdo-target.c
2804
vdo_enter_read_only_mode(vdo, result);
drivers/md/dm-vdo/dm-vdo-target.c
2808
if (vdo_get_admin_state(vdo)->normal) {
drivers/md/dm-vdo/dm-vdo-target.c
2813
result = perform_admin_operation(vdo, RESUME_PHASE_START, resume_callback,
drivers/md/dm-vdo/dm-vdo-target.c
2831
struct vdo *vdo = get_vdo_for_target(ti);
drivers/md/dm-vdo/dm-vdo-target.c
2834
vdo_register_thread_device_id(&instance_thread, &vdo->instance);
drivers/md/dm-vdo/dm-vdo-target.c
2835
result = vdo_preresume_registered(ti, vdo);
drivers/md/dm-vdo/dm-vdo-target.c
890
static struct vdo *get_vdo_for_target(struct dm_target *ti)
drivers/md/dm-vdo/dm-vdo-target.c
892
return ((struct device_config *) ti->private)->vdo;
drivers/md/dm-vdo/dm-vdo-target.c
898
struct vdo *vdo = get_vdo_for_target(ti);
drivers/md/dm-vdo/dm-vdo-target.c
900
const struct admin_state_code *code = vdo_get_admin_state_code(&vdo->admin.state);
drivers/md/dm-vdo/dm-vdo-target.c
906
vdo_count_bios(&vdo->stats.bios_in, bio);
drivers/md/dm-vdo/dm-vdo-target.c
911
vdo_launch_flush(vdo, bio);
drivers/md/dm-vdo/dm-vdo-target.c
918
(vdo == vdo_get_work_queue_owner(current_work_queue)->vdo));
drivers/md/dm-vdo/dm-vdo-target.c
919
vdo_launch_bio(vdo->data_vio_pool, bio);
drivers/md/dm-vdo/dm-vdo-target.c
925
struct vdo *vdo = get_vdo_for_target(ti);
drivers/md/dm-vdo/dm-vdo-target.c
927
limits->logical_block_size = vdo->device_config->logical_block_size;
drivers/md/dm-vdo/dm-vdo-target.c
949
(vdo->device_config->max_discard_blocks * VDO_SECTORS_PER_BLOCK);
drivers/md/dm-vdo/dm-vdo-target.c
976
struct vdo *vdo = get_vdo_for_target(ti);
drivers/md/dm-vdo/dm-vdo-target.c
985
mutex_lock(&vdo->stats_mutex);
drivers/md/dm-vdo/dm-vdo-target.c
986
vdo_fetch_statistics(vdo, &vdo->stats_buffer);
drivers/md/dm-vdo/dm-vdo-target.c
987
stats = &vdo->stats_buffer;
drivers/md/dm-vdo/dm-vdo-target.c
990
vdo_get_backing_device(vdo), stats->mode,
drivers/md/dm-vdo/dm-vdo-target.c
992
vdo_get_dedupe_index_state_name(vdo->hash_zones),
drivers/md/dm-vdo/dm-vdo-target.c
993
vdo_get_compressing(vdo) ? "online" : "offline",
drivers/md/dm-vdo/dm-vdo-target.c
996
mutex_unlock(&vdo->stats_mutex);
drivers/md/dm-vdo/dump.c
130
int vdo_dump(struct vdo *vdo, unsigned int argc, char *const *argv, const char *why)
drivers/md/dm-vdo/dump.c
138
do_dump(vdo, dump_options_requested, why);
drivers/md/dm-vdo/dump.c
143
void vdo_dump_all(struct vdo *vdo, const char *why)
drivers/md/dm-vdo/dump.c
145
do_dump(vdo, ~0, why);
drivers/md/dm-vdo/dump.c
55
static void do_dump(struct vdo *vdo, unsigned int dump_options_requested,
drivers/md/dm-vdo/dump.c
62
active = get_data_vio_pool_active_requests(vdo->data_vio_pool);
drivers/md/dm-vdo/dump.c
63
maximum = get_data_vio_pool_maximum_requests(vdo->data_vio_pool);
drivers/md/dm-vdo/dump.c
64
outstanding = (atomic64_read(&vdo->stats.bios_submitted) -
drivers/md/dm-vdo/dump.c
65
atomic64_read(&vdo->stats.bios_completed));
drivers/md/dm-vdo/dump.c
68
vdo_get_device_name(vdo->device_config->owning_target));
drivers/md/dm-vdo/dump.c
69
if (((dump_options_requested & FLAG_SHOW_QUEUES) != 0) && (vdo->threads != NULL)) {
drivers/md/dm-vdo/dump.c
72
for (id = 0; id < vdo->thread_config.thread_count; id++)
drivers/md/dm-vdo/dump.c
73
vdo_dump_work_queue(vdo->threads[id].queue);
drivers/md/dm-vdo/dump.c
76
vdo_dump_hash_zones(vdo->hash_zones);
drivers/md/dm-vdo/dump.c
77
dump_data_vio_pool(vdo->data_vio_pool,
drivers/md/dm-vdo/dump.c
80
vdo_dump_status(vdo);
drivers/md/dm-vdo/dump.h
11
int vdo_dump(struct vdo *vdo, unsigned int argc, char *const *argv, const char *why);
drivers/md/dm-vdo/dump.h
13
void vdo_dump_all(struct vdo *vdo, const char *why);
drivers/md/dm-vdo/encodings.c
1321
decode_vdo_component(buffer, offset, &states->vdo);
drivers/md/dm-vdo/encodings.c
1324
states->vdo.config.physical_blocks, &states->layout);
drivers/md/dm-vdo/encodings.c
1391
if (geometry_nonce != states->vdo.nonce) {
drivers/md/dm-vdo/encodings.c
1395
(unsigned long long) states->vdo.nonce);
drivers/md/dm-vdo/encodings.c
1398
return vdo_validate_config(&states->vdo.config, physical_size, logical_size);
drivers/md/dm-vdo/encodings.c
1413
encode_vdo_component(buffer, offset, states->vdo);
drivers/md/dm-vdo/encodings.h
687
struct vdo_component vdo;
drivers/md/dm-vdo/flush.c
117
vdo_initialize_completion(&flush->completion, flusher->vdo,
drivers/md/dm-vdo/flush.c
135
int vdo_make_flusher(struct vdo *vdo)
drivers/md/dm-vdo/flush.c
137
int result = vdo_allocate(1, struct flusher, __func__, &vdo->flusher);
drivers/md/dm-vdo/flush.c
142
vdo->flusher->vdo = vdo;
drivers/md/dm-vdo/flush.c
143
vdo->flusher->thread_id = vdo->thread_config.packer_thread;
drivers/md/dm-vdo/flush.c
144
vdo_set_admin_state_code(&vdo->flusher->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
drivers/md/dm-vdo/flush.c
145
vdo_initialize_completion(&vdo->flusher->completion, vdo,
drivers/md/dm-vdo/flush.c
148
spin_lock_init(&vdo->flusher->lock);
drivers/md/dm-vdo/flush.c
149
bio_list_init(&vdo->flusher->waiting_flush_bios);
drivers/md/dm-vdo/flush.c
150
vdo->flusher->flush_pool = mempool_create(1, allocate_flush, free_flush,
drivers/md/dm-vdo/flush.c
151
vdo->flusher);
drivers/md/dm-vdo/flush.c
152
return ((vdo->flusher->flush_pool == NULL) ? -ENOMEM : VDO_SUCCESS);
drivers/md/dm-vdo/flush.c
216
vdo_increment_packer_flush_generation(flusher->vdo->packer);
drivers/md/dm-vdo/flush.c
255
flusher->logical_zone_to_notify = &flusher->vdo->logical_zones->zones[0];
drivers/md/dm-vdo/flush.c
26
struct vdo *vdo;
drivers/md/dm-vdo/flush.c
270
struct flusher *flusher = completion->vdo->flusher;
drivers/md/dm-vdo/flush.c
278
vdo_enter_read_only_mode(flusher->vdo, result);
drivers/md/dm-vdo/flush.c
321
for (zone = &flusher->vdo->logical_zones->zones[0]; zone != NULL; zone = zone->next)
drivers/md/dm-vdo/flush.c
369
static void initialize_flush(struct vdo_flush *flush, struct vdo *vdo)
drivers/md/dm-vdo/flush.c
372
bio_list_merge_init(&flush->bios, &vdo->flusher->waiting_flush_bios);
drivers/md/dm-vdo/flush.c
380
completion->vdo->thread_config.packer_thread, NULL);
drivers/md/dm-vdo/flush.c
392
void vdo_launch_flush(struct vdo *vdo, struct bio *bio)
drivers/md/dm-vdo/flush.c
398
struct vdo_flush *flush = mempool_alloc(vdo->flusher->flush_pool, GFP_NOWAIT);
drivers/md/dm-vdo/flush.c
399
struct flusher *flusher = vdo->flusher;
drivers/md/dm-vdo/flush.c
416
initialize_flush(flush, vdo);
drivers/md/dm-vdo/flush.c
434
struct flusher *flusher = flush->completion.vdo->flusher;
drivers/md/dm-vdo/flush.c
441
initialize_flush(flush, flusher->vdo);
drivers/md/dm-vdo/flush.c
463
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/flush.c
471
vdo_count_bios(&vdo->stats.bios_acknowledged, bio);
drivers/md/dm-vdo/flush.c
474
bio_set_dev(bio, vdo_get_backing_device(vdo));
drivers/md/dm-vdo/flush.c
475
atomic64_inc(&vdo->stats.flush_out);
drivers/md/dm-vdo/flush.c
493
struct vdo *vdo = flusher->vdo;
drivers/md/dm-vdo/flush.c
494
zone_count_t bio_threads = flusher->vdo->thread_config.bio_thread_count;
drivers/md/dm-vdo/flush.c
498
return vdo->thread_config.bio_threads[0];
drivers/md/dm-vdo/flush.c
500
interval = vdo->device_config->thread_counts.bio_rotation_interval;
drivers/md/dm-vdo/flush.c
508
return vdo->thread_config.bio_threads[flusher->bio_queue_rotor];
drivers/md/dm-vdo/flush.c
521
select_bio_queue(completion->vdo->flusher), NULL);
drivers/md/dm-vdo/flush.h
28
int __must_check vdo_make_flusher(struct vdo *vdo);
drivers/md/dm-vdo/flush.h
38
void vdo_launch_flush(struct vdo *vdo, struct bio *bio);
drivers/md/dm-vdo/io-submitter.c
109
struct vdo *vdo = vio->completion.vdo;
drivers/md/dm-vdo/io-submitter.c
112
atomic64_inc(&vdo->stats.bios_submitted);
drivers/md/dm-vdo/io-submitter.c
114
bio_set_dev(bio, vdo_get_backing_device(vdo));
drivers/md/dm-vdo/io-submitter.c
142
struct io_submitter *submitter = vio->completion.vdo->io_submitter;
drivers/md/dm-vdo/io-submitter.c
275
struct vdo *vdo = vio->completion.vdo;
drivers/md/dm-vdo/io-submitter.c
277
&vdo->io_submitter->bio_queue_data[vio->bio_zone];
drivers/md/dm-vdo/io-submitter.c
348
const struct admin_state_code *code = vdo_get_admin_state(completion->vdo);
drivers/md/dm-vdo/io-submitter.c
379
unsigned int max_requests_active, struct vdo *vdo,
drivers/md/dm-vdo/io-submitter.c
421
result = vdo_make_thread(vdo, vdo->thread_config.bio_threads[i],
drivers/md/dm-vdo/io-submitter.c
435
bio_queue_data->queue = vdo->threads[vdo->thread_config.bio_threads[i]].queue;
drivers/md/dm-vdo/io-submitter.c
76
struct atomic_statistics *stats = &vio->completion.vdo->stats;
drivers/md/dm-vdo/io-submitter.h
17
unsigned int max_requests_active, struct vdo *vdo,
drivers/md/dm-vdo/logical-zone.c
102
zones->vdo = vdo;
drivers/md/dm-vdo/logical-zone.c
113
vdo->thread_config.admin_thread, zones, NULL,
drivers/md/dm-vdo/logical-zone.c
114
vdo, &zones->manager);
drivers/md/dm-vdo/logical-zone.c
277
vdo_complete_flushes(zone->zones->vdo->flusher);
drivers/md/dm-vdo/logical-zone.c
302
vdo_get_flusher_thread_id(zone->zones->vdo->flusher));
drivers/md/dm-vdo/logical-zone.c
54
struct vdo *vdo = zones->vdo;
drivers/md/dm-vdo/logical-zone.c
62
if (zone_number < vdo->thread_config.logical_zone_count - 1)
drivers/md/dm-vdo/logical-zone.c
65
vdo_initialize_completion(&zone->completion, vdo,
drivers/md/dm-vdo/logical-zone.c
69
zone->thread_id = vdo->thread_config.logical_threads[zone_number];
drivers/md/dm-vdo/logical-zone.c
70
zone->block_map_zone = &vdo->block_map->zones[zone_number];
drivers/md/dm-vdo/logical-zone.c
74
allocation_zone_number = zone->thread_id % vdo->thread_config.physical_zone_count;
drivers/md/dm-vdo/logical-zone.c
75
zone->allocation_zone = &vdo->physical_zones->zones[allocation_zone_number];
drivers/md/dm-vdo/logical-zone.c
77
return vdo_make_default_thread(vdo, zone->thread_id);
drivers/md/dm-vdo/logical-zone.c
87
int vdo_make_logical_zones(struct vdo *vdo, struct logical_zones **zones_ptr)
drivers/md/dm-vdo/logical-zone.c
92
zone_count_t zone_count = vdo->thread_config.logical_zone_count;
drivers/md/dm-vdo/logical-zone.h
57
struct vdo *vdo;
drivers/md/dm-vdo/logical-zone.h
66
int __must_check vdo_make_logical_zones(struct vdo *vdo,
drivers/md/dm-vdo/message-stats.c
418
int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen)
drivers/md/dm-vdo/message-stats.c
429
vdo_fetch_statistics(vdo, stats);
drivers/md/dm-vdo/message-stats.c
461
int vdo_write_config(struct vdo *vdo, char **buf, unsigned int *maxlen)
drivers/md/dm-vdo/message-stats.c
463
struct vdo_config *config = &vdo->states.vdo.config;
drivers/md/dm-vdo/message-stats.c
477
write_index_config(&vdo->geometry.index_config, buf, maxlen);
drivers/md/dm-vdo/message-stats.h
11
int vdo_write_config(struct vdo *vdo, char **buf, unsigned int *maxlen);
drivers/md/dm-vdo/message-stats.h
12
int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen);
drivers/md/dm-vdo/packer.c
143
int vdo_make_packer(struct vdo *vdo, block_count_t bin_count, struct packer **packer_ptr)
drivers/md/dm-vdo/packer.c
153
packer->thread_id = vdo->thread_config.packer_thread;
drivers/md/dm-vdo/packer.c
178
result = vdo_make_default_thread(vdo, packer->thread_id);
drivers/md/dm-vdo/packer.h
101
int __must_check vdo_make_packer(struct vdo *vdo, block_count_t bin_count,
drivers/md/dm-vdo/physical-zone.c
326
static int initialize_zone(struct vdo *vdo, struct physical_zones *zones)
drivers/md/dm-vdo/physical-zone.c
343
zone->thread_id = vdo->thread_config.physical_threads[zone_number];
drivers/md/dm-vdo/physical-zone.c
344
zone->allocator = &vdo->depot->allocators[zone_number];
drivers/md/dm-vdo/physical-zone.c
345
zone->next = &zones->zones[(zone_number + 1) % vdo->thread_config.physical_zone_count];
drivers/md/dm-vdo/physical-zone.c
346
result = vdo_make_default_thread(vdo, zone->thread_id);
drivers/md/dm-vdo/physical-zone.c
362
int vdo_make_physical_zones(struct vdo *vdo, struct physical_zones **zones_ptr)
drivers/md/dm-vdo/physical-zone.c
366
zone_count_t zone_count = vdo->thread_config.physical_zone_count;
drivers/md/dm-vdo/physical-zone.c
377
result = initialize_zone(vdo, zones);
drivers/md/dm-vdo/physical-zone.h
94
int __must_check vdo_make_physical_zones(struct vdo *vdo,
drivers/md/dm-vdo/recovery-journal.c
323
return vdo_is_read_only(journal->flush_vio->completion.vdo);
drivers/md/dm-vdo/recovery-journal.c
397
vdo_enter_read_only_mode(journal->flush_vio->completion.vdo, error_code);
drivers/md/dm-vdo/recovery-journal.c
590
struct vdo *vdo)
drivers/md/dm-vdo/recovery-journal.c
593
struct thread_config *config = &vdo->thread_config;
drivers/md/dm-vdo/recovery-journal.c
625
vdo_initialize_completion(&counter->completion, vdo,
drivers/md/dm-vdo/recovery-journal.c
658
static int initialize_recovery_block(struct vdo *vdo, struct recovery_journal *journal,
drivers/md/dm-vdo/recovery-journal.c
679
result = allocate_vio_components(vdo, VIO_TYPE_RECOVERY_JOURNAL,
drivers/md/dm-vdo/recovery-journal.c
706
struct vdo *vdo, struct partition *partition,
drivers/md/dm-vdo/recovery-journal.c
725
journal->thread_id = vdo->thread_config.journal_thread;
drivers/md/dm-vdo/recovery-journal.c
742
result = initialize_recovery_block(vdo, journal, block);
drivers/md/dm-vdo/recovery-journal.c
749
result = initialize_lock_counter(journal, vdo);
drivers/md/dm-vdo/recovery-journal.c
755
result = create_metadata_vio(vdo, VIO_TYPE_RECOVERY_JOURNAL, VIO_PRIORITY_HIGH,
drivers/md/dm-vdo/recovery-journal.c
762
result = vdo_register_read_only_listener(vdo, journal,
drivers/md/dm-vdo/recovery-journal.c
770
result = vdo_make_default_thread(vdo, journal->thread_id);
drivers/md/dm-vdo/recovery-journal.h
255
nonce_t nonce, struct vdo *vdo,
drivers/md/dm-vdo/repair.c
1041
&repair->completion.vdo->block_map->zones[0], pbn, true,
drivers/md/dm-vdo/repair.c
1099
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/repair.c
1103
vdo_assert_on_logical_zone_thread(vdo, 0, __func__);
drivers/md/dm-vdo/repair.c
1106
vdo->block_map->zones[0].page_cache.rebuilding =
drivers/md/dm-vdo/repair.c
1107
vdo_state_requires_read_only_rebuild(vdo->load_state);
drivers/md/dm-vdo/repair.c
1224
struct recovery_journal *journal = repair->completion.vdo->recovery_journal;
drivers/md/dm-vdo/repair.c
1275
static bool unpack_entry(struct vdo *vdo, char *packed, enum vdo_metadata_type format,
drivers/md/dm-vdo/repair.c
1309
return (validate_recovery_journal_entry(vdo, entry) == VDO_SUCCESS);
drivers/md/dm-vdo/repair.c
1326
struct vdo *vdo = repair->completion.vdo;
drivers/md/dm-vdo/repair.c
1334
if (!unpack_entry(vdo, entries, format, &entry))
drivers/md/dm-vdo/repair.c
1406
struct vdo *vdo = repair->completion.vdo;
drivers/md/dm-vdo/repair.c
1407
struct recovery_journal *journal = vdo->recovery_journal;
drivers/md/dm-vdo/repair.c
1456
struct vdo *vdo = repair->completion.vdo;
drivers/md/dm-vdo/repair.c
1476
result = validate_recovery_journal_entry(vdo, &entry);
drivers/md/dm-vdo/repair.c
1478
vdo_enter_read_only_mode(vdo, result);
drivers/md/dm-vdo/repair.c
1495
vdo_enter_read_only_mode(vdo, result);
drivers/md/dm-vdo/repair.c
1517
struct vdo *vdo = repair->completion.vdo;
drivers/md/dm-vdo/repair.c
1518
struct recovery_journal *journal = vdo->recovery_journal;
drivers/md/dm-vdo/repair.c
1531
result = validate_recovery_journal_entry(vdo, &entry);
drivers/md/dm-vdo/repair.c
1533
vdo_enter_read_only_mode(vdo, result);
drivers/md/dm-vdo/repair.c
1557
struct recovery_journal *journal = repair->completion.vdo->recovery_journal;
drivers/md/dm-vdo/repair.c
1583
vdo_enter_read_only_mode(repair->completion.vdo, VDO_CORRUPT_JOURNAL);
drivers/md/dm-vdo/repair.c
1653
return (vdo_state_requires_read_only_rebuild(repair->completion.vdo->load_state) ?
drivers/md/dm-vdo/repair.c
1684
struct vdo *vdo = vio->completion.vdo;
drivers/md/dm-vdo/repair.c
1686
continue_vio_after_io(vio, finish_journal_load, vdo->thread_config.admin_thread);
drivers/md/dm-vdo/repair.c
1698
struct vdo *vdo = parent->vdo;
drivers/md/dm-vdo/repair.c
1699
struct recovery_journal *journal = vdo->recovery_journal;
drivers/md/dm-vdo/repair.c
1704
vdo->device_config->cache_size >> 1,
drivers/md/dm-vdo/repair.c
1707
vdo_assert_on_admin_thread(vdo, __func__);
drivers/md/dm-vdo/repair.c
1709
if (vdo->load_state == VDO_FORCE_REBUILD) {
drivers/md/dm-vdo/repair.c
1711
vdo->states.vdo.read_only_recoveries++;
drivers/md/dm-vdo/repair.c
1712
} else if (vdo->load_state == VDO_REBUILD_FOR_UPGRADE) {
drivers/md/dm-vdo/repair.c
1726
vdo_initialize_completion(&repair->completion, vdo, VDO_REPAIR_COMPLETION);
drivers/md/dm-vdo/repair.c
1746
result = allocate_vio_components(vdo, VIO_TYPE_RECOVERY_JOURNAL,
drivers/md/dm-vdo/repair.c
207
const struct thread_config *thread_config = &completion->vdo->thread_config;
drivers/md/dm-vdo/repair.c
242
repair->completion.vdo->block_map->zones[0].page_cache.rebuilding = false;
drivers/md/dm-vdo/repair.c
253
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/repair.c
256
vdo_assert_on_admin_thread(vdo, __func__);
drivers/md/dm-vdo/repair.c
258
if (vdo->load_state != VDO_REBUILD_FOR_UPGRADE)
drivers/md/dm-vdo/repair.c
259
vdo->states.vdo.complete_recoveries++;
drivers/md/dm-vdo/repair.c
261
vdo_initialize_recovery_journal_post_repair(vdo->recovery_journal,
drivers/md/dm-vdo/repair.c
262
vdo->states.vdo.complete_recoveries,
drivers/md/dm-vdo/repair.c
268
if (vdo_state_requires_read_only_rebuild(vdo->load_state)) {
drivers/md/dm-vdo/repair.c
281
vdo_continue_completion(parent, vdo_allocate_reference_counters(vdo->depot));
drivers/md/dm-vdo/repair.c
294
if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state))
drivers/md/dm-vdo/repair.c
326
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/repair.c
330
vdo_assert_on_admin_thread(vdo, __func__);
drivers/md/dm-vdo/repair.c
333
if (vdo_state_requires_read_only_rebuild(vdo->load_state)) {
drivers/md/dm-vdo/repair.c
342
vdo_drain_slab_depot(vdo->depot, operation, completion);
drivers/md/dm-vdo/repair.c
353
vdo_assert_on_admin_thread(completion->vdo, __func__);
drivers/md/dm-vdo/repair.c
358
vdo_drain_block_map(completion->vdo->block_map, VDO_ADMIN_STATE_RECOVERING,
drivers/md/dm-vdo/repair.c
423
struct slab_depot *depot = completion->vdo->depot;
drivers/md/dm-vdo/repair.c
527
if (vdo_is_physical_data_block(repair->completion.vdo->depot, pbn))
drivers/md/dm-vdo/repair.c
545
struct block_map *block_map = repair->completion.vdo->block_map;
drivers/md/dm-vdo/repair.c
577
struct block_map *map = completion->vdo->block_map;
drivers/md/dm-vdo/repair.c
616
struct slab_depot *depot = completion->vdo->depot;
drivers/md/dm-vdo/repair.c
640
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/repair.c
641
struct vdo_page_cache *cache = &vdo->block_map->zones[0].page_cache;
drivers/md/dm-vdo/repair.c
644
if (abort_on_error(vdo_allocate_reference_counters(vdo->depot), repair))
drivers/md/dm-vdo/repair.c
655
vdo_traverse_forest(vdo->block_map, process_entry, completion);
drivers/md/dm-vdo/repair.c
738
sector = get_sector(repair->completion.vdo->recovery_journal,
drivers/md/dm-vdo/repair.c
751
static int validate_recovery_journal_entry(const struct vdo *vdo,
drivers/md/dm-vdo/repair.c
754
if ((entry->slot.pbn >= vdo->states.vdo.config.physical_blocks) ||
drivers/md/dm-vdo/repair.c
758
!vdo_is_physical_data_block(vdo->depot, entry->mapping.pbn) ||
drivers/md/dm-vdo/repair.c
759
!vdo_is_physical_data_block(vdo->depot, entry->unmapping.pbn)) {
drivers/md/dm-vdo/repair.c
797
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/repair.c
798
struct recovery_journal *journal = vdo->recovery_journal;
drivers/md/dm-vdo/repair.c
815
result = validate_recovery_journal_entry(vdo, &entry);
drivers/md/dm-vdo/repair.c
817
vdo_enter_read_only_mode(vdo, result);
drivers/md/dm-vdo/repair.c
830
slab = vdo_get_slab(vdo->depot, pbn);
drivers/md/dm-vdo/repair.c
855
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/repair.c
857
vdo_assert_on_physical_zone_thread(vdo, allocator->zone_number, __func__);
drivers/md/dm-vdo/repair.c
860
repair->logical_blocks_used = vdo->recovery_journal->logical_blocks_used;
drivers/md/dm-vdo/repair.c
861
repair->block_map_data_blocks = vdo->recovery_journal->block_map_data_blocks;
drivers/md/dm-vdo/repair.c
888
vdo_assert_on_admin_thread(completion->vdo, __func__);
drivers/md/dm-vdo/repair.c
890
if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state)) {
drivers/md/dm-vdo/repair.c
899
vdo_load_slab_depot(completion->vdo->depot, operation, completion, repair);
drivers/md/dm-vdo/repair.c
907
vdo_assert_on_admin_thread(completion->vdo, __func__);
drivers/md/dm-vdo/repair.c
911
operation = (vdo_state_requires_read_only_rebuild(completion->vdo->load_state) ?
drivers/md/dm-vdo/repair.c
914
vdo_drain_block_map(completion->vdo->block_map, operation, completion);
drivers/md/dm-vdo/slab-depot.c
1024
vdo_enter_read_only_mode(slab->allocator->depot->vdo, result);
drivers/md/dm-vdo/slab-depot.c
1045
if (vdo_is_read_only(slab->allocator->depot->vdo))
drivers/md/dm-vdo/slab-depot.c
1087
if (vdo_is_read_only(completion->vdo)) {
drivers/md/dm-vdo/slab-depot.c
1175
vdo_enter_read_only_mode(slab->allocator->depot->vdo, result);
drivers/md/dm-vdo/slab-depot.c
187
read_only = vdo_is_read_only(slab->allocator->depot->vdo);
drivers/md/dm-vdo/slab-depot.c
240
(vdo_is_read_only(allocator->depot->vdo) ?
drivers/md/dm-vdo/slab-depot.c
252
int result = (vdo_is_read_only(allocator->depot->vdo) ?
drivers/md/dm-vdo/slab-depot.c
2640
if (vdo_is_read_only(allocator->depot->vdo))
drivers/md/dm-vdo/slab-depot.c
2649
vdo_enter_read_only_mode(allocator->depot->vdo, result);
drivers/md/dm-vdo/slab-depot.c
2672
if (allocator->depot->vdo->suspend_type == VDO_ADMIN_STATE_SAVING)
drivers/md/dm-vdo/slab-depot.c
2770
atomic_cmpxchg(&allocator->depot->vdo->state, VDO_RECOVERING,
drivers/md/dm-vdo/slab-depot.c
2835
vdo_enter_read_only_mode(scrubber->vio.completion.vdo, result);
drivers/md/dm-vdo/slab-depot.c
299
vdo_enter_read_only_mode(completion->vdo, completion->result);
drivers/md/dm-vdo/slab-depot.c
3030
if (vdo_is_read_only(completion->vdo)) {
drivers/md/dm-vdo/slab-depot.c
330
if (vdo_is_read_only(depot->vdo)) {
drivers/md/dm-vdo/slab-depot.c
3309
if (vdo_is_read_only(allocator->depot->vdo))
drivers/md/dm-vdo/slab-depot.c
3328
struct vdo_slab *slab = vdo_get_slab(completion->vdo->depot, updater->zpbn.pbn);
drivers/md/dm-vdo/slab-depot.c
3335
if (vdo_is_read_only(completion->vdo)) {
drivers/md/dm-vdo/slab-depot.c
3506
pbn = slab->journal_origin - depot->vdo->geometry.bio_offset;
drivers/md/dm-vdo/slab-depot.c
3508
.bdev = vdo_get_backing_device(depot->vdo),
drivers/md/dm-vdo/slab-depot.c
372
if (vdo_is_read_only(block->vio.completion.vdo)) {
drivers/md/dm-vdo/slab-depot.c
3760
journal->recovery_journal = slab->allocator->depot->vdo->recovery_journal;
drivers/md/dm-vdo/slab-depot.c
3933
vdo_is_read_only(journal->slab->allocator->depot->vdo))
drivers/md/dm-vdo/slab-depot.c
4019
result = allocate_vio_components(allocator->completion.vdo,
drivers/md/dm-vdo/slab-depot.c
4052
result = allocate_vio_components(allocator->depot->vdo, VIO_TYPE_SLAB_SUMMARY,
drivers/md/dm-vdo/slab-depot.c
4070
struct vdo *vdo = depot->vdo;
drivers/md/dm-vdo/slab-depot.c
4078
.thread_id = vdo->thread_config.physical_threads[zone],
drivers/md/dm-vdo/slab-depot.c
4079
.nonce = vdo->states.vdo.nonce,
drivers/md/dm-vdo/slab-depot.c
4084
result = vdo_register_read_only_listener(vdo, allocator,
drivers/md/dm-vdo/slab-depot.c
4090
vdo_initialize_completion(&allocator->completion, vdo, VDO_BLOCK_ALLOCATOR_COMPLETION);
drivers/md/dm-vdo/slab-depot.c
4091
result = make_vio_pool(vdo, BLOCK_ALLOCATOR_VIO_POOL_SIZE, 1, allocator->thread_id,
drivers/md/dm-vdo/slab-depot.c
4102
result = make_vio_pool(vdo, BLOCK_ALLOCATOR_REFCOUNT_VIO_POOL_SIZE,
drivers/md/dm-vdo/slab-depot.c
4161
const struct thread_config *thread_config = &depot->vdo->thread_config;
drivers/md/dm-vdo/slab-depot.c
4166
depot->vdo, &depot->action_manager);
drivers/md/dm-vdo/slab-depot.c
4245
int vdo_decode_slab_depot(struct slab_depot_state_2_0 state, struct vdo *vdo,
drivers/md/dm-vdo/slab-depot.c
4266
vdo->thread_config.physical_zone_count,
drivers/md/dm-vdo/slab-depot.c
4271
depot->vdo = vdo;
drivers/md/dm-vdo/slab-depot.c
4273
depot->zone_count = vdo->thread_config.physical_zone_count;
drivers/md/dm-vdo/slab-depot.c
431
vdo_enter_read_only_mode(completion->vdo, completion->result);
drivers/md/dm-vdo/slab-depot.c
4441
vdo_enter_read_only_mode(depot->vdo, result);
drivers/md/dm-vdo/slab-depot.c
4555
struct vdo *vdo = vio->completion.vdo;
drivers/md/dm-vdo/slab-depot.c
4558
vdo->thread_config.admin_thread);
drivers/md/dm-vdo/slab-depot.c
4608
struct slab_depot *depot = completion->vdo->depot;
drivers/md/dm-vdo/slab-depot.c
4622
struct vdo *vdo = vio->completion.vdo;
drivers/md/dm-vdo/slab-depot.c
4625
vdo->thread_config.admin_thread);
drivers/md/dm-vdo/slab-depot.c
4643
result = create_multi_block_metadata_vio(depot->vdo, VIO_TYPE_SLAB_SUMMARY,
drivers/md/dm-vdo/slab-depot.c
476
vdo_is_read_only(journal->slab->allocator->depot->vdo)) {
drivers/md/dm-vdo/slab-depot.c
5028
if (vdo_is_read_only(depot->vdo)) {
drivers/md/dm-vdo/slab-depot.c
577
vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result);
drivers/md/dm-vdo/slab-depot.c
629
vdo_is_read_only(journal->slab->allocator->depot->vdo) ||
drivers/md/dm-vdo/slab-depot.c
709
vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result);
drivers/md/dm-vdo/slab-depot.c
793
(vdo_is_read_only(journal->slab->allocator->depot->vdo) ?
drivers/md/dm-vdo/slab-depot.c
815
if (vdo_is_read_only(journal->slab->allocator->depot->vdo) ||
drivers/md/dm-vdo/slab-depot.c
918
vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result);
drivers/md/dm-vdo/slab-depot.c
927
vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo,
drivers/md/dm-vdo/slab-depot.h
464
struct vdo *vdo;
drivers/md/dm-vdo/slab-depot.h
555
struct vdo *vdo,
drivers/md/dm-vdo/types.h
211
struct vdo *vdo;
drivers/md/dm-vdo/types.h
314
struct vdo *vdo;
drivers/md/dm-vdo/types.h
333
struct vdo;
drivers/md/dm-vdo/vdo.c
101
static struct vdo * __must_check filter_vdos_locked(vdo_filter_fn filter,
drivers/md/dm-vdo/vdo.c
1026
struct vdo *vdo = listener;
drivers/md/dm-vdo/vdo.c
1028
if (vdo_in_read_only_mode(vdo))
drivers/md/dm-vdo/vdo.c
1031
vdo_set_state(vdo, VDO_READ_ONLY_MODE);
drivers/md/dm-vdo/vdo.c
1032
vdo_save_components(vdo, parent);
drivers/md/dm-vdo/vdo.c
104
struct vdo *vdo;
drivers/md/dm-vdo/vdo.c
1041
int vdo_enable_read_only_entry(struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
1044
bool is_read_only = vdo_in_read_only_mode(vdo);
drivers/md/dm-vdo/vdo.c
1045
struct read_only_notifier *notifier = &vdo->read_only_notifier;
drivers/md/dm-vdo/vdo.c
1055
vdo_initialize_completion(¬ifier->completion, vdo,
drivers/md/dm-vdo/vdo.c
1058
for (id = 0; id < vdo->thread_config.thread_count; id++)
drivers/md/dm-vdo/vdo.c
1059
vdo->threads[id].is_read_only = is_read_only;
drivers/md/dm-vdo/vdo.c
106
list_for_each_entry(vdo, ®istry.links, registration) {
drivers/md/dm-vdo/vdo.c
1061
return vdo_register_read_only_listener(vdo, vdo, notify_vdo_of_read_only_mode,
drivers/md/dm-vdo/vdo.c
1062
vdo->thread_config.admin_thread);
drivers/md/dm-vdo/vdo.c
107
if (filter(vdo, context))
drivers/md/dm-vdo/vdo.c
1075
struct vdo *vdo = parent->vdo;
drivers/md/dm-vdo/vdo.c
1076
struct read_only_notifier *notifier = &vdo->read_only_notifier;
drivers/md/dm-vdo/vdo.c
1078
vdo_assert_on_admin_thread(vdo, __func__);
drivers/md/dm-vdo/vdo.c
108
return vdo;
drivers/md/dm-vdo/vdo.c
1122
vdo_assert_on_admin_thread(completion->vdo, __func__);
drivers/md/dm-vdo/vdo.c
1139
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/vdo.c
1146
struct vdo_thread *thread = &vdo->threads[thread_id];
drivers/md/dm-vdo/vdo.c
1168
if (++thread_id == vdo->thread_config.dedupe_thread) {
drivers/md/dm-vdo/vdo.c
1176
if (thread_id >= vdo->thread_config.thread_count) {
drivers/md/dm-vdo/vdo.c
1180
vdo->thread_config.admin_thread, NULL);
drivers/md/dm-vdo/vdo.c
119
struct vdo *vdo_find_matching(vdo_filter_fn filter, const void *context)
drivers/md/dm-vdo/vdo.c
1203
struct vdo *vdo = parent->vdo;
drivers/md/dm-vdo/vdo.c
1204
struct read_only_notifier *notifier = &vdo->read_only_notifier;
drivers/md/dm-vdo/vdo.c
1206
vdo_assert_on_admin_thread(vdo, __func__);
drivers/md/dm-vdo/vdo.c
121
struct vdo *vdo;
drivers/md/dm-vdo/vdo.c
124
vdo = filter_vdos_locked(filter, context);
drivers/md/dm-vdo/vdo.c
1242
void vdo_enter_read_only_mode(struct vdo *vdo, int error_code)
drivers/md/dm-vdo/vdo.c
1246
struct read_only_notifier *notifier = &vdo->read_only_notifier;
drivers/md/dm-vdo/vdo.c
1250
thread = &vdo->threads[thread_id];
drivers/md/dm-vdo/vdo.c
127
return vdo;
drivers/md/dm-vdo/vdo.c
1288
bool vdo_is_read_only(struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
1290
return vdo->threads[vdo_get_callback_thread_id()].is_read_only;
drivers/md/dm-vdo/vdo.c
1299
bool vdo_in_read_only_mode(const struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
1301
return (vdo_get_state(vdo) == VDO_READ_ONLY_MODE);
drivers/md/dm-vdo/vdo.c
1310
bool vdo_in_recovery_mode(const struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
1312
return (vdo_get_state(vdo) == VDO_RECOVERING);
drivers/md/dm-vdo/vdo.c
1319
void vdo_enter_recovery_mode(struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
1321
vdo_assert_on_admin_thread(vdo, __func__);
drivers/md/dm-vdo/vdo.c
1323
if (vdo_in_read_only_mode(vdo))
drivers/md/dm-vdo/vdo.c
1327
vdo_set_state(vdo, VDO_RECOVERING);
drivers/md/dm-vdo/vdo.c
1348
static int perform_synchronous_action(struct vdo *vdo, vdo_action_fn action,
drivers/md/dm-vdo/vdo.c
135
&thread->vdo->allocations_allowed);
drivers/md/dm-vdo/vdo.c
1353
vdo_initialize_completion(&sync.vdo_completion, vdo, VDO_SYNC_COMPLETION);
drivers/md/dm-vdo/vdo.c
1367
struct vdo *vdo = completion->vdo;
drivers/md/dm-vdo/vdo.c
1369
bool was_enabled = vdo_get_compressing(vdo);
drivers/md/dm-vdo/vdo.c
1372
WRITE_ONCE(vdo->compressing, *enable);
drivers/md/dm-vdo/vdo.c
1375
vdo_flush_packer(vdo->packer);
drivers/md/dm-vdo/vdo.c
1391
bool vdo_set_compressing(struct vdo *vdo, bool enable)
drivers/md/dm-vdo/vdo.c
1393
perform_synchronous_action(vdo, set_compression_callback,
drivers/md/dm-vdo/vdo.c
1394
vdo->thread_config.packer_thread,
drivers/md/dm-vdo/vdo.c
1405
bool vdo_get_compressing(struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
1407
return READ_ONCE(vdo->compressing);
drivers/md/dm-vdo/vdo.c
1410
static size_t get_block_map_cache_size(const struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
1412
return ((size_t) vdo->device_config->cache_size) * VDO_BLOCK_SIZE;
drivers/md/dm-vdo/vdo.c
1415
static struct error_statistics __must_check get_vdo_error_statistics(const struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
1422
const struct atomic_statistics *atoms = &vdo->stats;
drivers/md/dm-vdo/vdo.c
1460
static block_count_t __must_check vdo_get_physical_blocks_allocated(const struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
1462
return (vdo_get_slab_depot_allocated_blocks(vdo->depot) -
drivers/md/dm-vdo/vdo.c
1463
vdo_get_journal_block_map_data_blocks_used(vdo->recovery_journal));
drivers/md/dm-vdo/vdo.c
1472
static block_count_t __must_check vdo_get_physical_blocks_overhead(const struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
1479
return (vdo->states.vdo.config.physical_blocks -
drivers/md/dm-vdo/vdo.c
1480
vdo_get_slab_depot_data_blocks(vdo->depot) +
drivers/md/dm-vdo/vdo.c
1481
vdo_get_journal_block_map_data_blocks_used(vdo->recovery_journal));
drivers/md/dm-vdo/vdo.c
1504
static void get_vdo_statistics(const struct vdo *vdo, struct vdo_statistics *stats)
drivers/md/dm-vdo/vdo.c
1506
struct recovery_journal *journal = vdo->recovery_journal;
drivers/md/dm-vdo/vdo.c
1507
enum vdo_state state = vdo_get_state(vdo);
drivers/md/dm-vdo/vdo.c
1509
vdo_assert_on_admin_thread(vdo, __func__);
drivers/md/dm-vdo/vdo.c
1519
stats->logical_blocks = vdo->states.vdo.config.logical_blocks;
drivers/md/dm-vdo/vdo.c
1525
stats->physical_blocks = vdo->states.vdo.config.physical_blocks;
drivers/md/dm-vdo/vdo.c
1527
stats->complete_recoveries = vdo->states.vdo.complete_recoveries;
drivers/md/dm-vdo/vdo.c
1528
stats->read_only_recoveries = vdo->states.vdo.read_only_recoveries;
drivers/md/dm-vdo/vdo.c
1529
stats->block_map_cache_size = get_block_map_cache_size(vdo);
drivers/md/dm-vdo/vdo.c
1532
stats->data_blocks_used = vdo_get_physical_blocks_allocated(vdo);
drivers/md/dm-vdo/vdo.c
1533
stats->overhead_blocks_used = vdo_get_physical_blocks_overhead(vdo);
drivers/md/dm-vdo/vdo.c
1535
vdo_get_slab_depot_statistics(vdo->depot, stats);
drivers/md/dm-vdo/vdo.c
1537
stats->packer = vdo_get_packer_statistics(vdo->packer);
drivers/md/dm-vdo/vdo.c
1538
stats->block_map = vdo_get_block_map_statistics(vdo->block_map);
drivers/md/dm-vdo/vdo.c
1539
vdo_get_dedupe_statistics(vdo->hash_zones, stats);
drivers/md/dm-vdo/vdo.c
1540
stats->errors = get_vdo_error_statistics(vdo);
drivers/md/dm-vdo/vdo.c
1544
stats->instance = vdo->instance;
drivers/md/dm-vdo/vdo.c
1545
stats->current_vios_in_progress = get_data_vio_pool_active_requests(vdo->data_vio_pool);
drivers/md/dm-vdo/vdo.c
1546
stats->max_vios = get_data_vio_pool_maximum_requests(vdo->data_vio_pool);
drivers/md/dm-vdo/vdo.c
1548
stats->flush_out = atomic64_read(&vdo->stats.flush_out);
drivers/md/dm-vdo/vdo.c
1549
stats->logical_block_size = vdo->device_config->logical_block_size;
drivers/md/dm-vdo/vdo.c
1550
copy_bio_stat(&stats->bios_in, &vdo->stats.bios_in);
drivers/md/dm-vdo/vdo.c
1551
copy_bio_stat(&stats->bios_in_partial, &vdo->stats.bios_in_partial);
drivers/md/dm-vdo/vdo.c
1552
copy_bio_stat(&stats->bios_out, &vdo->stats.bios_out);
drivers/md/dm-vdo/vdo.c
1553
copy_bio_stat(&stats->bios_meta, &vdo->stats.bios_meta);
drivers/md/dm-vdo/vdo.c
1554
copy_bio_stat(&stats->bios_journal, &vdo->stats.bios_journal);
drivers/md/dm-vdo/vdo.c
1555
copy_bio_stat(&stats->bios_page_cache, &vdo->stats.bios_page_cache);
drivers/md/dm-vdo/vdo.c
1556
copy_bio_stat(&stats->bios_out_completed, &vdo->stats.bios_out_completed);
drivers/md/dm-vdo/vdo.c
1557
copy_bio_stat(&stats->bios_meta_completed, &vdo->stats.bios_meta_completed);
drivers/md/dm-vdo/vdo.c
1559
&vdo->stats.bios_journal_completed);
drivers/md/dm-vdo/vdo.c
1561
&vdo->stats.bios_page_cache_completed);
drivers/md/dm-vdo/vdo.c
1562
copy_bio_stat(&stats->bios_acknowledged, &vdo->stats.bios_acknowledged);
drivers/md/dm-vdo/vdo.c
1563
copy_bio_stat(&stats->bios_acknowledged_partial, &vdo->stats.bios_acknowledged_partial);
drivers/md/dm-vdo/vdo.c
1579
get_vdo_statistics(completion->vdo, completion->parent);
drivers/md/dm-vdo/vdo.c
1588
void vdo_fetch_statistics(struct vdo *vdo, struct vdo_statistics *stats)
drivers/md/dm-vdo/vdo.c
1590
perform_synchronous_action(vdo, vdo_fetch_statistics_callback,
drivers/md/dm-vdo/vdo.c
1591
vdo->thread_config.admin_thread, stats);
drivers/md/dm-vdo/vdo.c
1613
BUG_ON(thread_id >= thread->vdo->thread_config.thread_count);
drivers/md/dm-vdo/vdo.c
1614
BUG_ON(thread != &thread->vdo->threads[thread_id]);
drivers/md/dm-vdo/vdo.c
1624
void vdo_dump_status(const struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
1628
vdo_dump_flusher(vdo->flusher);
drivers/md/dm-vdo/vdo.c
1629
vdo_dump_recovery_journal_statistics(vdo->recovery_journal);
drivers/md/dm-vdo/vdo.c
1630
vdo_dump_packer(vdo->packer);
drivers/md/dm-vdo/vdo.c
1631
vdo_dump_slab_depot(vdo->depot);
drivers/md/dm-vdo/vdo.c
1633
for (zone = 0; zone < vdo->thread_config.logical_zone_count; zone++)
drivers/md/dm-vdo/vdo.c
1634
vdo_dump_logical_zone(&vdo->logical_zones->zones[zone]);
drivers/md/dm-vdo/vdo.c
1636
for (zone = 0; zone < vdo->thread_config.physical_zone_count; zone++)
drivers/md/dm-vdo/vdo.c
1637
vdo_dump_physical_zone(&vdo->physical_zones->zones[zone]);
drivers/md/dm-vdo/vdo.c
1639
vdo_dump_hash_zones(vdo->hash_zones);
drivers/md/dm-vdo/vdo.c
1647
void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name)
drivers/md/dm-vdo/vdo.c
1649
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread),
drivers/md/dm-vdo/vdo.c
1660
void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone,
drivers/md/dm-vdo/vdo.c
1664
vdo->thread_config.logical_threads[logical_zone]),
drivers/md/dm-vdo/vdo.c
1675
void vdo_assert_on_physical_zone_thread(const struct vdo *vdo,
drivers/md/dm-vdo/vdo.c
1679
vdo->thread_config.physical_threads[physical_zone]),
drivers/md/dm-vdo/vdo.c
1698
int vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn,
drivers/md/dm-vdo/vdo.c
1713
if (!vdo_is_physical_data_block(vdo->depot, pbn))
drivers/md/dm-vdo/vdo.c
1717
slab = vdo_get_slab(vdo->depot, pbn);
drivers/md/dm-vdo/vdo.c
1722
*zone_ptr = &vdo->physical_zones->zones[slab->allocator->zone_number];
drivers/md/dm-vdo/vdo.c
266
static int __must_check read_geometry_block(struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
276
result = create_metadata_vio(vdo, VIO_TYPE_GEOMETRY, VIO_PRIORITY_HIGH, NULL,
drivers/md/dm-vdo/vdo.c
296
bio_set_dev(vio->bio, vdo_get_backing_device(vdo));
drivers/md/dm-vdo/vdo.c
306
result = vdo_parse_geometry_block((u8 *) block, &vdo->geometry);
drivers/md/dm-vdo/vdo.c
410
int vdo_make_thread(struct vdo *vdo, thread_id_t thread_id,
drivers/md/dm-vdo/vdo.c
414
struct vdo_thread *thread = &vdo->threads[thread_id];
drivers/md/dm-vdo/vdo.c
426
thread->vdo = vdo;
drivers/md/dm-vdo/vdo.c
428
get_thread_name(&vdo->thread_config, thread_id, queue_name, sizeof(queue_name));
drivers/md/dm-vdo/vdo.c
429
return vdo_make_work_queue(vdo->thread_name_prefix, queue_name, thread,
drivers/md/dm-vdo/vdo.c
439
static int register_vdo(struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
444
result = VDO_ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL,
drivers/md/dm-vdo/vdo.c
447
INIT_LIST_HEAD(&vdo->registration);
drivers/md/dm-vdo/vdo.c
448
list_add_tail(&vdo->registration, ®istry.links);
drivers/md/dm-vdo/vdo.c
463
static int initialize_vdo(struct vdo *vdo, struct device_config *config,
drivers/md/dm-vdo/vdo.c
469
vdo->device_config = config;
drivers/md/dm-vdo/vdo.c
470
vdo->starting_sector_offset = config->owning_target->begin;
drivers/md/dm-vdo/vdo.c
471
vdo->instance = instance;
drivers/md/dm-vdo/vdo.c
472
vdo->allocations_allowed = true;
drivers/md/dm-vdo/vdo.c
473
vdo_set_admin_state_code(&vdo->admin.state, VDO_ADMIN_STATE_NEW);
drivers/md/dm-vdo/vdo.c
474
INIT_LIST_HEAD(&vdo->device_config_list);
drivers/md/dm-vdo/vdo.c
475
vdo_initialize_completion(&vdo->admin.completion, vdo, VDO_ADMIN_COMPLETION);
drivers/md/dm-vdo/vdo.c
476
init_completion(&vdo->admin.callback_sync);
drivers/md/dm-vdo/vdo.c
477
mutex_init(&vdo->stats_mutex);
drivers/md/dm-vdo/vdo.c
478
result = read_geometry_block(vdo);
drivers/md/dm-vdo/vdo.c
484
result = initialize_thread_config(config->thread_counts, &vdo->thread_config);
drivers/md/dm-vdo/vdo.c
493
config->thread_counts.hash_zones, vdo->thread_config.thread_count);
drivers/md/dm-vdo/vdo.c
497
&vdo->compression_context);
drivers/md/dm-vdo/vdo.c
505
&vdo->compression_context[i]);
drivers/md/dm-vdo/vdo.c
512
result = register_vdo(vdo);
drivers/md/dm-vdo/vdo.c
518
vdo_set_admin_state_code(&vdo->admin.state, VDO_ADMIN_STATE_INITIALIZED);
drivers/md/dm-vdo/vdo.c
532
struct vdo **vdo_ptr)
drivers/md/dm-vdo/vdo.c
535
struct vdo *vdo;
drivers/md/dm-vdo/vdo.c
540
result = vdo_allocate(1, struct vdo, __func__, &vdo);
drivers/md/dm-vdo/vdo.c
546
result = initialize_vdo(vdo, config, instance, reason);
drivers/md/dm-vdo/vdo.c
548
vdo_destroy(vdo);
drivers/md/dm-vdo/vdo.c
553
*vdo_ptr = vdo;
drivers/md/dm-vdo/vdo.c
555
snprintf(vdo->thread_name_prefix, sizeof(vdo->thread_name_prefix),
drivers/md/dm-vdo/vdo.c
557
result = vdo_allocate(vdo->thread_config.thread_count,
drivers/md/dm-vdo/vdo.c
558
struct vdo_thread, __func__, &vdo->threads);
drivers/md/dm-vdo/vdo.c
564
result = vdo_make_thread(vdo, vdo->thread_config.admin_thread,
drivers/md/dm-vdo/vdo.c
571
result = vdo_make_flusher(vdo);
drivers/md/dm-vdo/vdo.c
577
result = vdo_make_packer(vdo, DEFAULT_PACKER_BINS, &vdo->packer);
drivers/md/dm-vdo/vdo.c
583
BUG_ON(vdo->device_config->logical_block_size <= 0);
drivers/md/dm-vdo/vdo.c
584
BUG_ON(vdo->device_config->owned_device == NULL);
drivers/md/dm-vdo/vdo.c
585
result = make_data_vio_pool(vdo, MAXIMUM_VDO_USER_VIOS,
drivers/md/dm-vdo/vdo.c
587
&vdo->data_vio_pool);
drivers/md/dm-vdo/vdo.c
595
get_data_vio_pool_request_limit(vdo->data_vio_pool),
drivers/md/dm-vdo/vdo.c
596
vdo, &vdo->io_submitter);
drivers/md/dm-vdo/vdo.c
602
if (vdo_uses_bio_ack_queue(vdo)) {
drivers/md/dm-vdo/vdo.c
603
result = vdo_make_thread(vdo, vdo->thread_config.bio_ack_thread,
drivers/md/dm-vdo/vdo.c
612
result = vdo_make_thread(vdo, vdo->thread_config.cpu_thread, &cpu_q_type,
drivers/md/dm-vdo/vdo.c
614
(void **) vdo->compression_context);
drivers/md/dm-vdo/vdo.c
623
static void finish_vdo(struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
627
if (vdo->threads == NULL)
drivers/md/dm-vdo/vdo.c
630
vdo_cleanup_io_submitter(vdo->io_submitter);
drivers/md/dm-vdo/vdo.c
631
vdo_finish_dedupe_index(vdo->hash_zones);
drivers/md/dm-vdo/vdo.c
633
for (i = 0; i < vdo->thread_config.thread_count; i++)
drivers/md/dm-vdo/vdo.c
634
vdo_finish_work_queue(vdo->threads[i].queue);
drivers/md/dm-vdo/vdo.c
661
static void unregister_vdo(struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
664
if (filter_vdos_locked(vdo_is_equal, vdo) == vdo)
drivers/md/dm-vdo/vdo.c
665
list_del_init(&vdo->registration);
drivers/md/dm-vdo/vdo.c
674
void vdo_destroy(struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
678
if (vdo == NULL)
drivers/md/dm-vdo/vdo.c
682
BUG_ON(vdo_get_admin_state(vdo)->normal);
drivers/md/dm-vdo/vdo.c
684
vdo->allocations_allowed = true;
drivers/md/dm-vdo/vdo.c
686
finish_vdo(vdo);
drivers/md/dm-vdo/vdo.c
687
unregister_vdo(vdo);
drivers/md/dm-vdo/vdo.c
688
free_data_vio_pool(vdo->data_vio_pool);
drivers/md/dm-vdo/vdo.c
689
vdo_free_io_submitter(vdo_forget(vdo->io_submitter));
drivers/md/dm-vdo/vdo.c
690
vdo_free_flusher(vdo_forget(vdo->flusher));
drivers/md/dm-vdo/vdo.c
691
vdo_free_packer(vdo_forget(vdo->packer));
drivers/md/dm-vdo/vdo.c
692
vdo_free_recovery_journal(vdo_forget(vdo->recovery_journal));
drivers/md/dm-vdo/vdo.c
693
vdo_free_slab_depot(vdo_forget(vdo->depot));
drivers/md/dm-vdo/vdo.c
694
vdo_uninitialize_layout(&vdo->layout);
drivers/md/dm-vdo/vdo.c
695
vdo_uninitialize_layout(&vdo->next_layout);
drivers/md/dm-vdo/vdo.c
696
if (vdo->partition_copier)
drivers/md/dm-vdo/vdo.c
697
dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier));
drivers/md/dm-vdo/vdo.c
698
uninitialize_super_block(&vdo->super_block);
drivers/md/dm-vdo/vdo.c
699
vdo_free_block_map(vdo_forget(vdo->block_map));
drivers/md/dm-vdo/vdo.c
700
vdo_free_hash_zones(vdo_forget(vdo->hash_zones));
drivers/md/dm-vdo/vdo.c
701
vdo_free_physical_zones(vdo_forget(vdo->physical_zones));
drivers/md/dm-vdo/vdo.c
702
vdo_free_logical_zones(vdo_forget(vdo->logical_zones));
drivers/md/dm-vdo/vdo.c
704
if (vdo->threads != NULL) {
drivers/md/dm-vdo/vdo.c
705
for (i = 0; i < vdo->thread_config.thread_count; i++) {
drivers/md/dm-vdo/vdo.c
706
free_listeners(&vdo->threads[i]);
drivers/md/dm-vdo/vdo.c
707
vdo_free_work_queue(vdo_forget(vdo->threads[i].queue));
drivers/md/dm-vdo/vdo.c
709
vdo_free(vdo_forget(vdo->threads));
drivers/md/dm-vdo/vdo.c
712
uninitialize_thread_config(&vdo->thread_config);
drivers/md/dm-vdo/vdo.c
714
if (vdo->compression_context != NULL) {
drivers/md/dm-vdo/vdo.c
715
for (i = 0; i < vdo->device_config->thread_counts.cpu_threads; i++)
drivers/md/dm-vdo/vdo.c
716
vdo_free(vdo_forget(vdo->compression_context[i]));
drivers/md/dm-vdo/vdo.c
718
vdo_free(vdo_forget(vdo->compression_context));
drivers/md/dm-vdo/vdo.c
720
vdo_free(vdo);
drivers/md/dm-vdo/vdo.c
723
static int initialize_super_block(struct vdo *vdo, struct vdo_super_block *super_block)
drivers/md/dm-vdo/vdo.c
728
(char **) &vdo->super_block.buffer);
drivers/md/dm-vdo/vdo.c
732
return allocate_vio_components(vdo, VIO_TYPE_SUPER_BLOCK,
drivers/md/dm-vdo/vdo.c
735
&vdo->super_block.vio);
drivers/md/dm-vdo/vdo.c
779
void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent)
drivers/md/dm-vdo/vdo.c
783
result = initialize_super_block(vdo, &vdo->super_block);
drivers/md/dm-vdo/vdo.c
789
vdo->super_block.vio.completion.parent = parent;
drivers/md/dm-vdo/vdo.c
790
vdo_submit_metadata_vio(&vdo->super_block.vio,
drivers/md/dm-vdo/vdo.c
791
vdo_get_data_region_start(vdo->geometry),
drivers/md/dm-vdo/vdo.c
803
struct block_device *vdo_get_backing_device(const struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
805
return vdo->device_config->owned_device->bdev;
drivers/md/dm-vdo/vdo.c
825
int vdo_synchronous_flush(struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
830
bio_init(&bio, vdo_get_backing_device(vdo), NULL, 0,
drivers/md/dm-vdo/vdo.c
835
atomic64_inc(&vdo->stats.flush_out);
drivers/md/dm-vdo/vdo.c
853
enum vdo_state vdo_get_state(const struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
855
enum vdo_state state = atomic_read(&vdo->state);
drivers/md/dm-vdo/vdo.c
869
void vdo_set_state(struct vdo *vdo, enum vdo_state state)
drivers/md/dm-vdo/vdo.c
87
static bool vdo_is_equal(struct vdo *vdo, const void *context)
drivers/md/dm-vdo/vdo.c
873
atomic_set(&vdo->state, state);
drivers/md/dm-vdo/vdo.c
882
const struct admin_state_code *vdo_get_admin_state(const struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
884
return vdo_get_admin_state_code(&vdo->admin.state);
drivers/md/dm-vdo/vdo.c
89
return (vdo == context);
drivers/md/dm-vdo/vdo.c
891
static void record_vdo(struct vdo *vdo)
drivers/md/dm-vdo/vdo.c
894
vdo->states.unused = vdo->geometry.unused;
drivers/md/dm-vdo/vdo.c
895
vdo->states.vdo.state = vdo_get_state(vdo);
drivers/md/dm-vdo/vdo.c
896
vdo->states.block_map = vdo_record_block_map(vdo->block_map);
drivers/md/dm-vdo/vdo.c
897
vdo->states.recovery_journal = vdo_record_recovery_journal(vdo->recovery_journal);
drivers/md/dm-vdo/vdo.c
898
vdo->states.slab_depot = vdo_record_slab_depot(vdo->depot);
drivers/md/dm-vdo/vdo.c
899
vdo->states.layout = vdo->layout;
drivers/md/dm-vdo/vdo.c
952
void vdo_save_components(struct vdo *vdo, struct vdo_completion *parent)
drivers/md/dm-vdo/vdo.c
954
struct vdo_super_block *super_block = &vdo->super_block;
drivers/md/dm-vdo/vdo.c
966
record_vdo(vdo);
drivers/md/dm-vdo/vdo.c
968
vdo_encode_super_block(super_block->buffer, &vdo->states);
drivers/md/dm-vdo/vdo.c
972
vdo_get_data_region_start(vdo->geometry),
drivers/md/dm-vdo/vdo.c
987
int vdo_register_read_only_listener(struct vdo *vdo, void *listener,
drivers/md/dm-vdo/vdo.c
991
struct vdo_thread *thread = &vdo->threads[thread_id];
drivers/md/dm-vdo/vdo.c
995
result = VDO_ASSERT(thread_id != vdo->thread_config.dedupe_thread,
drivers/md/dm-vdo/vdo.h
275
static inline bool vdo_uses_bio_ack_queue(struct vdo *vdo)
drivers/md/dm-vdo/vdo.h
277
return vdo->device_config->thread_counts.bio_ack_threads > 0;
drivers/md/dm-vdo/vdo.h
287
typedef bool (*vdo_filter_fn)(struct vdo *vdo, const void *context);
drivers/md/dm-vdo/vdo.h
290
struct vdo * __must_check vdo_find_matching(vdo_filter_fn filter, const void *context);
drivers/md/dm-vdo/vdo.h
292
int __must_check vdo_make_thread(struct vdo *vdo, thread_id_t thread_id,
drivers/md/dm-vdo/vdo.h
296
static inline int __must_check vdo_make_default_thread(struct vdo *vdo,
drivers/md/dm-vdo/vdo.h
299
return vdo_make_thread(vdo, thread_id, NULL, 1, NULL);
drivers/md/dm-vdo/vdo.h
303
char **reason, struct vdo **vdo_ptr);
drivers/md/dm-vdo/vdo.h
305
void vdo_destroy(struct vdo *vdo);
drivers/md/dm-vdo/vdo.h
307
void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent);
drivers/md/dm-vdo/vdo.h
309
struct block_device * __must_check vdo_get_backing_device(const struct vdo *vdo);
drivers/md/dm-vdo/vdo.h
313
int __must_check vdo_synchronous_flush(struct vdo *vdo);
drivers/md/dm-vdo/vdo.h
315
const struct admin_state_code * __must_check vdo_get_admin_state(const struct vdo *vdo);
drivers/md/dm-vdo/vdo.h
317
bool vdo_set_compressing(struct vdo *vdo, bool enable);
drivers/md/dm-vdo/vdo.h
319
bool vdo_get_compressing(struct vdo *vdo);
drivers/md/dm-vdo/vdo.h
321
void vdo_fetch_statistics(struct vdo *vdo, struct vdo_statistics *stats);
drivers/md/dm-vdo/vdo.h
325
enum vdo_state __must_check vdo_get_state(const struct vdo *vdo);
drivers/md/dm-vdo/vdo.h
327
void vdo_set_state(struct vdo *vdo, enum vdo_state state);
drivers/md/dm-vdo/vdo.h
329
void vdo_save_components(struct vdo *vdo, struct vdo_completion *parent);
drivers/md/dm-vdo/vdo.h
331
int vdo_register_read_only_listener(struct vdo *vdo, void *listener,
drivers/md/dm-vdo/vdo.h
335
int vdo_enable_read_only_entry(struct vdo *vdo);
drivers/md/dm-vdo/vdo.h
341
void vdo_enter_read_only_mode(struct vdo *vdo, int error_code);
drivers/md/dm-vdo/vdo.h
343
bool __must_check vdo_is_read_only(struct vdo *vdo);
drivers/md/dm-vdo/vdo.h
345
bool __must_check vdo_in_read_only_mode(const struct vdo *vdo);
drivers/md/dm-vdo/vdo.h
347
bool __must_check vdo_in_recovery_mode(const struct vdo *vdo);
drivers/md/dm-vdo/vdo.h
349
void vdo_enter_recovery_mode(struct vdo *vdo);
drivers/md/dm-vdo/vdo.h
351
void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name);
drivers/md/dm-vdo/vdo.h
353
void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone,
drivers/md/dm-vdo/vdo.h
356
void vdo_assert_on_physical_zone_thread(const struct vdo *vdo, zone_count_t physical_zone,
drivers/md/dm-vdo/vdo.h
359
int __must_check vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn,
drivers/md/dm-vdo/vdo.h
362
void vdo_dump_status(const struct vdo *vdo);
drivers/md/dm-vdo/vdo.h
57
struct vdo *vdo;
drivers/md/dm-vdo/vio.c
100
initialize_vio(vio, bio, block_count, vio_type, priority, vdo);
drivers/md/dm-vdo/vio.c
118
int create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
drivers/md/dm-vdo/vio.c
138
result = allocate_vio_components(vdo, vio_type, priority, parent, block_count,
drivers/md/dm-vdo/vio.c
176
struct vdo *vdo = vio->completion.vdo;
drivers/md/dm-vdo/vio.c
177
struct device_config *config = vdo->device_config;
drivers/md/dm-vdo/vio.c
179
pbn -= vdo->geometry.bio_offset;
drivers/md/dm-vdo/vio.c
261
struct vdo *vdo = vio->completion.vdo;
drivers/md/dm-vdo/vio.c
265
atomic64_inc(&vdo->stats.read_only_error_count);
drivers/md/dm-vdo/vio.c
269
atomic64_inc(&vdo->stats.no_space_error_count);
drivers/md/dm-vdo/vio.c
321
int make_vio_pool(struct vdo *vdo, size_t pool_size, size_t block_count, thread_id_t thread_id,
drivers/md/dm-vdo/vio.c
350
result = allocate_vio_components(vdo, vio_type, priority, NULL, block_count, ptr,
drivers/md/dm-vdo/vio.c
44
struct vdo *vdo = vio->completion.vdo;
drivers/md/dm-vdo/vio.c
47
return ((pbn == VDO_GEOMETRY_BLOCK_LOCATION) ? pbn : pbn + vdo->geometry.bio_offset);
drivers/md/dm-vdo/vio.c
494
struct atomic_statistics *stats = &vio->completion.vdo->stats;
drivers/md/dm-vdo/vio.c
512
atomic64_inc(&vio->completion.vdo->stats.bios_completed);
drivers/md/dm-vdo/vio.c
78
int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type,
drivers/md/dm-vdo/vio.h
111
enum vio_priority priority, struct vdo *vdo)
drivers/md/dm-vdo/vio.h
120
vdo_initialize_completion(&vio->completion, vdo, VIO_COMPLETION);
drivers/md/dm-vdo/vio.h
197
int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, size_t block_count,
drivers/md/dm-vdo/vio.h
58
return vio->completion.vdo->thread_config.bio_threads[vio->bio_zone];
drivers/md/dm-vdo/vio.h
80
int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type,
drivers/md/dm-vdo/vio.h
83
int __must_check create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
drivers/md/dm-vdo/vio.h
88
static inline int __must_check create_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
drivers/md/dm-vdo/vio.h
93
return create_multi_block_metadata_vio(vdo, vio_type, priority, parent, 1, data,
drivers/platform/chrome/cros_ec_typec.c
407
desc.vdo = DP_PORT_VDO;
drivers/platform/chrome/cros_ec_typec.c
561
return node->amode->vdo;
drivers/platform/chrome/cros_ec_typec.c
688
dp_data.conf |= VDO_TYPEC_CABLE_SPEED(port->c_identity.vdo[0]) <<
drivers/platform/chrome/cros_ec_typec.c
902
desc.vdo = sop_disc->svids[i].mode_vdo[j];
drivers/platform/chrome/cros_ec_typec.c
969
id->vdo[i - 3] = disc->discovery_vdo[i];
drivers/platform/chrome/cros_ec_typec.c
997
cable_plug_type = VDO_TYPEC_CABLE_TYPE(port->c_identity.vdo[0]);
drivers/platform/chrome/cros_typec_altmode.c
58
static int cros_typec_altmode_enter(struct typec_altmode *alt, u32 *vdo)
drivers/platform/chrome/cros_typec_vdm.c
121
const u32 *vdo, int cnt)
drivers/platform/chrome/cros_typec_vdm.c
134
vdm_req.vdm_data[i] = vdo[i-1];
drivers/platform/chrome/cros_typec_vdm.c
95
static int cros_typec_port_amode_enter(struct typec_altmode *amode, u32 *vdo)
drivers/usb/typec/altmodes/displayport.c
106
signal = DP_CAP_DP_SIGNALLING(dp->port->vdo) & DP_CAP_DP_SIGNALLING(dp->alt->vdo);
drivers/usb/typec/altmodes/displayport.c
108
signal &= DP_CAP_DP_SIGNALLING(dp->plug_prime->vdo);
drivers/usb/typec/altmodes/displayport.c
117
pin_assign = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo) &
drivers/usb/typec/altmodes/displayport.c
118
DP_CAP_DFP_D_PIN_ASSIGN(dp->port->vdo);
drivers/usb/typec/altmodes/displayport.c
121
pin_assign &= DP_CAP_DFP_D_PIN_ASSIGN(dp->plug_prime->vdo);
drivers/usb/typec/altmodes/displayport.c
126
pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
drivers/usb/typec/altmodes/displayport.c
127
DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
drivers/usb/typec/altmodes/displayport.c
130
pin_assign &= DP_CAP_UFP_D_PIN_ASSIGN(dp->plug_prime->vdo);
drivers/usb/typec/altmodes/displayport.c
270
u32 vdo;
drivers/usb/typec/altmodes/displayport.c
299
vdo = 1;
drivers/usb/typec/altmodes/displayport.c
300
ret = typec_altmode_vdm(dp->alt, header, &vdo, 2);
drivers/usb/typec/altmodes/displayport.c
346
static void dp_altmode_attention(struct typec_altmode *alt, const u32 vdo)
drivers/usb/typec/altmodes/displayport.c
354
dp->data.status = vdo;
drivers/usb/typec/altmodes/displayport.c
373
const u32 hdr, const u32 *vdo, int count)
drivers/usb/typec/altmodes/displayport.c
408
dp->data.status = *vdo;
drivers/usb/typec/altmodes/displayport.c
444
const u32 hdr, const u32 *vdo, int count)
drivers/usb/typec/altmodes/displayport.c
560
cap = DP_CAP_CAPABILITY(dp->alt->vdo);
drivers/usb/typec/altmodes/displayport.c
596
cap = DP_CAP_CAPABILITY(dp->alt->vdo);
drivers/usb/typec/altmodes/displayport.c
632
return DP_CAP_PIN_ASSIGN_DFP_D(dp->alt->vdo);
drivers/usb/typec/altmodes/displayport.c
634
return DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo);
drivers/usb/typec/altmodes/displayport.c
771
if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) &
drivers/usb/typec/altmodes/displayport.c
772
DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) &&
drivers/usb/typec/altmodes/displayport.c
773
!(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) &
drivers/usb/typec/altmodes/displayport.c
774
DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo))) {
drivers/usb/typec/altmodes/thunderbolt.c
141
const u32 *vdo, int count)
drivers/usb/typec/altmodes/thunderbolt.c
190
const u32 hdr, const u32 *vdo, int count)
drivers/usb/typec/altmodes/thunderbolt.c
211
data.device_mode = tbt->alt->vdo;
drivers/usb/typec/altmodes/thunderbolt.c
214
data.cable_mode = tbt->plug[TYPEC_PLUG_SOP_P]->vdo;
drivers/usb/typec/altmodes/thunderbolt.c
319
u32 vdo;
drivers/usb/typec/altmodes/thunderbolt.c
347
vdo = tbt->alt->vdo & (TBT_VENDOR_SPECIFIC_B0 | TBT_VENDOR_SPECIFIC_B1);
drivers/usb/typec/altmodes/thunderbolt.c
348
vdo |= tbt->alt->vdo & TBT_INTEL_SPECIFIC_B0;
drivers/usb/typec/altmodes/thunderbolt.c
349
vdo |= TBT_MODE;
drivers/usb/typec/altmodes/thunderbolt.c
354
vdo |= TBT_ENTER_MODE_ACTIVE_CABLE;
drivers/usb/typec/altmodes/thunderbolt.c
356
vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_SPEED(plug->vdo));
drivers/usb/typec/altmodes/thunderbolt.c
357
vdo |= plug->vdo & TBT_CABLE_ROUNDED;
drivers/usb/typec/altmodes/thunderbolt.c
358
vdo |= plug->vdo & TBT_CABLE_OPTICAL;
drivers/usb/typec/altmodes/thunderbolt.c
359
vdo |= plug->vdo & TBT_CABLE_RETIMER;
drivers/usb/typec/altmodes/thunderbolt.c
360
vdo |= plug->vdo & TBT_CABLE_LINK_TRAINING;
drivers/usb/typec/altmodes/thunderbolt.c
362
vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_USB3_PASSIVE);
drivers/usb/typec/altmodes/thunderbolt.c
365
tbt->enter_vdo = vdo;
drivers/usb/typec/anx7411.c
552
int svid, int vdo)
drivers/usb/typec/anx7411.c
560
desc.vdo = vdo;
drivers/usb/typec/bus.c
126
int typec_altmode_enter(struct typec_altmode *adev, u32 *vdo)
drivers/usb/typec/bus.c
147
return pdev->ops->enter(pdev, vdo);
drivers/usb/typec/bus.c
186
int typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
drivers/usb/typec/bus.c
197
pdev->ops->attention(pdev, vdo);
drivers/usb/typec/bus.c
215
const u32 header, const u32 *vdo, int count)
drivers/usb/typec/bus.c
233
return pdev->ops->vdm(pdev, header, vdo, count);
drivers/usb/typec/bus.c
259
int typec_cable_altmode_enter(struct typec_altmode *adev, enum typec_plug_index sop, u32 *vdo)
drivers/usb/typec/bus.c
278
return pdev->cable_ops->enter(pdev, sop, vdo);
drivers/usb/typec/bus.c
322
const u32 header, const u32 *vdo, int count)
drivers/usb/typec/bus.c
345
return pdev->cable_ops->vdm(pdev, sop, header, vdo, count);
drivers/usb/typec/class.c
133
return sysfs_emit(buf, "0x%08x\n", id->vdo[0]);
drivers/usb/typec/class.c
142
return sysfs_emit(buf, "0x%08x\n", id->vdo[1]);
drivers/usb/typec/class.c
151
return sysfs_emit(buf, "0x%08x\n", id->vdo[2]);
drivers/usb/typec/class.c
2619
u32 vdo;
drivers/usb/typec/class.c
2636
ret = fwnode_property_read_u32(child, "vdo", &vdo);
drivers/usb/typec/class.c
2650
desc.vdo = vdo;
drivers/usb/typec/class.c
347
return sprintf(buf, "0x%08x\n", alt->vdo);
drivers/usb/typec/class.c
349
static DEVICE_ATTR_RO(vdo);
drivers/usb/typec/class.c
657
alt->adev.vdo = desc->vdo;
drivers/usb/typec/class.c
933
u32 devcap = PD_VDO_UFP_DEVCAP(id->vdo[0]);
drivers/usb/typec/class.c
942
usb_capability = PD_VDO_DFP_HOSTCAP(id->vdo[0]);
drivers/usb/typec/tcpm/tcpm.c
1703
u32 vdo = p[VDO_INDEX_IDH];
drivers/usb/typec/tcpm/tcpm.c
1708
port->partner_ident.id_header = vdo;
drivers/usb/typec/tcpm/tcpm.c
1716
PD_IDH_VID(vdo),
drivers/usb/typec/tcpm/tcpm.c
1739
port->cable_ident.vdo[0] = p[VDO_INDEX_CABLE_1];
drivers/usb/typec/tcpm/tcpm.c
1750
port->cable_ident.vdo[1] = p[VDO_INDEX_CABLE_2];
drivers/usb/typec/tcpm/tcpm.c
1859
paltmode->vdo = p[i];
drivers/usb/typec/tcpm/tcpm.c
1863
paltmode->mode, paltmode->vdo);
drivers/usb/typec/tcpm/tcpm.c
2902
static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
drivers/usb/typec/tcpm/tcpm.c
2912
header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
drivers/usb/typec/tcpm/tcpm.c
2915
return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
drivers/usb/typec/tcpm/tcpm.c
2950
u32 *vdo)
drivers/usb/typec/tcpm/tcpm.c
2960
header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
drivers/usb/typec/tcpm/tcpm.c
2963
return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
drivers/usb/typec/tipd/core.c
1246
desc.vdo = DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D));
drivers/usb/typec/tipd/core.c
1247
desc.vdo |= DP_CAP_DFP_D;
drivers/usb/typec/ucsi/displayport.c
160
u32 cap = dp->alt->vdo;
drivers/usb/typec/ucsi/displayport.c
319
desc->vdo |= DP_CAP_DP_SIGNALLING(0) | DP_CAP_RECEPTACLE;
drivers/usb/typec/ucsi/displayport.c
322
desc->vdo |= all_assignments << 8;
drivers/usb/typec/ucsi/displayport.c
323
desc->vdo |= all_assignments << 16;
drivers/usb/typec/ucsi/displayport.c
48
static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
drivers/usb/typec/ucsi/thunderbolt.c
108
ret = ucsi_thunderbolt_set_altmode(tbt, true, *vdo);
drivers/usb/typec/ucsi/thunderbolt.c
47
bool enter, u32 vdo)
drivers/usb/typec/ucsi/thunderbolt.c
55
((u64)vdo << 32);
drivers/usb/typec/ucsi/thunderbolt.c
81
static int ucsi_thunderbolt_enter(struct typec_altmode *alt, u32 *vdo)
drivers/usb/typec/ucsi/trace.h
101
__entry->mode, __entry->vdo)
drivers/usb/typec/ucsi/trace.h
91
__field(u32, vdo)
drivers/usb/typec/ucsi/trace.h
97
__entry->vdo = alt->vdo;
drivers/usb/typec/ucsi/ucsi.c
419
if (desc->vdo == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
drivers/usb/typec/ucsi/ucsi.c
555
desc.vdo = updated[i].mid;
drivers/usb/typec/ucsi/ucsi.c
558
desc.vdo = orig[i].mid;
drivers/usb/typec/ucsi/ucsi.c
623
desc.vdo = alt[j].mid;
drivers/usb/typec/ucsi/ucsi.c
664
adev[i]->vdo != USB_TYPEC_NVIDIA_VLINK_DBG_VDO))
drivers/usb/typec/ucsi/ucsi.c
802
u32 vdo[7] = {};
drivers/usb/typec/ucsi/ucsi.c
805
ret = ucsi_get_pd_message(con, UCSI_RECIPIENT_SOP, sizeof(vdo), vdo,
drivers/usb/typec/ucsi/ucsi.c
811
con->partner_identity = *(struct usb_pd_identity *)&vdo[1];
drivers/usb/typec/ucsi/ucsi.c
822
u32 vdo[7] = {};
drivers/usb/typec/ucsi/ucsi.c
825
ret = ucsi_get_pd_message(con, UCSI_RECIPIENT_SOP_P, sizeof(vdo), vdo,
drivers/usb/typec/ucsi/ucsi.c
830
con->cable_identity = *(struct usb_pd_identity *)&vdo[1];
drivers/usb/typec/ucsi/ucsi_ccg.c
517
(pin & DP_CONF_GET_PIN_ASSIGN(alt->vdo))) {
include/dt-bindings/usb/pd.h
431
#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
include/dt-bindings/usb/pd.h
432
#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
include/linux/platform_data/cros_ec_commands.h
5455
uint32_t vdo[6]; /* Mode VDOs */
include/linux/usb/pd_vdo.h
154
#define PD_IDH_PTYPE(vdo) (((vdo) >> 27) & 0x7)
include/linux/usb/pd_vdo.h
155
#define PD_IDH_VID(vdo) ((vdo) & 0xffff)
include/linux/usb/pd_vdo.h
156
#define PD_IDH_MODAL_SUPP(vdo) ((vdo) & (1 << 26))
include/linux/usb/pd_vdo.h
157
#define PD_IDH_DFP_PTYPE(vdo) (((vdo) >> 23) & 0x7)
include/linux/usb/pd_vdo.h
158
#define PD_IDH_CONN_TYPE(vdo) (((vdo) >> 21) & 0x3)
include/linux/usb/pd_vdo.h
159
#define PD_IDH_HOST_SUPP(vdo) ((vdo) & (1 << 31))
include/linux/usb/pd_vdo.h
166
#define PD_CSTAT_XID(vdo) (vdo)
include/linux/usb/pd_vdo.h
176
#define PD_PRODUCT_PID(vdo) (((vdo) >> 16) & 0xffff)
include/linux/usb/pd_vdo.h
192
#define PD_VDO_UFP_DEVCAP(vdo) FIELD_GET(GENMASK(27, 24), vdo)
include/linux/usb/pd_vdo.h
251
#define PD_VDO_DFP_HOSTCAP(vdo) FIELD_GET(GENMASK(26, 24), vdo)
include/linux/usb/pd_vdo.h
386
#define VDO_TYPEC_CABLE_SPEED(vdo) ((vdo) & 0x7)
include/linux/usb/pd_vdo.h
387
#define VDO_TYPEC_CABLE_TYPE(vdo) (((vdo) >> 18) & 0x3)
include/linux/usb/pd_vdo.h
464
#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
include/linux/usb/pd_vdo.h
465
#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
include/linux/usb/pd_vdo.h
510
#define PD_VDO_SVID_SVID0(vdo) ((vdo) >> 16)
include/linux/usb/pd_vdo.h
511
#define PD_VDO_SVID_SVID1(vdo) ((vdo) & 0xffff)
include/linux/usb/pd_vdo.h
78
#define PD_VDO_VID(vdo) ((vdo) >> 16)
include/linux/usb/pd_vdo.h
79
#define PD_VDO_SVDM(vdo) (((vdo) >> 15) & 1)
include/linux/usb/pd_vdo.h
80
#define PD_VDO_SVDM_VER(vdo) (((vdo) >> 13) & 0x3)
include/linux/usb/pd_vdo.h
81
#define PD_VDO_OPOS(vdo) (((vdo) >> 8) & 0x7)
include/linux/usb/pd_vdo.h
82
#define PD_VDO_CMD(vdo) ((vdo) & 0x1f)
include/linux/usb/pd_vdo.h
83
#define PD_VDO_CMDT(vdo) (((vdo) >> 6) & 0x3)
include/linux/usb/typec.h
134
u32 vdo[3];
include/linux/usb/typec.h
154
u32 vdo;
include/linux/usb/typec_altmode.h
100
const u32 hdr, const u32 *vdo, int cnt);
include/linux/usb/typec_altmode.h
103
int typec_cable_altmode_enter(struct typec_altmode *altmode, enum typec_plug_index sop, u32 *vdo);
include/linux/usb/typec_altmode.h
106
const u32 header, const u32 *vdo, int count);
include/linux/usb/typec_altmode.h
37
u32 vdo;
include/linux/usb/typec_altmode.h
70
int (*enter)(struct typec_altmode *altmode, u32 *vdo);
include/linux/usb/typec_altmode.h
72
void (*attention)(struct typec_altmode *altmode, u32 vdo);
include/linux/usb/typec_altmode.h
74
const u32 *vdo, int cnt);
include/linux/usb/typec_altmode.h
80
int typec_altmode_enter(struct typec_altmode *altmode, u32 *vdo);
include/linux/usb/typec_altmode.h
82
int typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
include/linux/usb/typec_altmode.h
84
const u32 header, const u32 *vdo, int count);
include/linux/usb/typec_altmode.h
97
int (*enter)(struct typec_altmode *altmode, enum typec_plug_index sop, u32 *vdo);
scripts/dtc/include-prefixes/dt-bindings/usb/pd.h
431
#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
scripts/dtc/include-prefixes/dt-bindings/usb/pd.h
432
#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)