#ifndef VDO_TYPES_H
#define VDO_TYPES_H
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/device-mapper.h>
#include <linux/list.h>
#include <linux/compiler_attributes.h>
#include <linux/types.h>
#include "funnel-queue.h"
typedef u64 block_count_t;
typedef u16 block_size_t;
typedef u16 data_vio_count_t;
typedef u8 height_t;
typedef u64 logical_block_number_t;
typedef u64 nonce_t;
typedef u32 page_count_t;
typedef u32 page_number_t;
typedef u64 physical_block_number_t;
typedef u8 root_count_t;
typedef u8 sector_count_t;
typedef u64 sequence_number_t;
typedef u32 slab_block_number;
typedef u16 slab_count_t;
typedef u16 slot_number_t;
typedef u8 thread_count_t;
typedef u8 thread_id_t;
typedef u8 zone_count_t;
enum vdo_state {
VDO_DIRTY = 0,
VDO_NEW = 1,
VDO_CLEAN = 2,
VDO_READ_ONLY_MODE = 3,
VDO_FORCE_REBUILD = 4,
VDO_RECOVERING = 5,
VDO_REPLAYING = 6,
VDO_REBUILD_FOR_UPGRADE = 7,
VDO_STATE_COUNT
};
static inline bool __must_check vdo_state_requires_read_only_rebuild(enum vdo_state state)
{
return ((state == VDO_FORCE_REBUILD) || (state == VDO_REBUILD_FOR_UPGRADE));
}
static inline bool __must_check vdo_state_requires_recovery(enum vdo_state state)
{
return ((state == VDO_DIRTY) || (state == VDO_REPLAYING) || (state == VDO_RECOVERING));
}
enum journal_operation {
VDO_JOURNAL_DATA_REMAPPING = 0,
VDO_JOURNAL_BLOCK_MAP_REMAPPING = 1,
} __packed;
enum partition_id {
VDO_BLOCK_MAP_PARTITION = 0,
VDO_SLAB_DEPOT_PARTITION = 1,
VDO_RECOVERY_JOURNAL_PARTITION = 2,
VDO_SLAB_SUMMARY_PARTITION = 3,
} __packed;
enum vdo_metadata_type {
VDO_METADATA_RECOVERY_JOURNAL = 1,
VDO_METADATA_SLAB_JOURNAL = 2,
VDO_METADATA_RECOVERY_JOURNAL_2 = 3,
} __packed;
struct block_map_slot {
physical_block_number_t pbn;
slot_number_t slot;
};
enum block_mapping_state {
VDO_MAPPING_STATE_UNMAPPED = 0,
VDO_MAPPING_STATE_UNCOMPRESSED = 1,
VDO_MAPPING_STATE_COMPRESSED_BASE = 2,
VDO_MAPPING_STATE_COMPRESSED_MAX = 15,
};
enum {
VDO_MAX_COMPRESSION_SLOTS =
(VDO_MAPPING_STATE_COMPRESSED_MAX - VDO_MAPPING_STATE_COMPRESSED_BASE + 1),
};
struct data_location {
physical_block_number_t pbn;
enum block_mapping_state state;
};
struct slab_config {
block_count_t slab_blocks;
block_count_t data_blocks;
block_count_t reference_count_blocks;
block_count_t slab_journal_blocks;
block_count_t slab_journal_flushing_threshold;
block_count_t slab_journal_blocking_threshold;
block_count_t slab_journal_scrubbing_threshold;
} __packed;
struct thread_count_config {
unsigned int bio_ack_threads;
unsigned int bio_threads;
unsigned int bio_rotation_interval;
unsigned int cpu_threads;
unsigned int logical_zones;
unsigned int physical_zones;
unsigned int hash_zones;
} __packed;
struct device_config {
struct dm_target *owning_target;
struct dm_dev *owned_device;
struct vdo *vdo;
struct list_head config_list;
char *original_string;
unsigned int version;
char *parent_device_name;
block_count_t physical_blocks;
block_count_t logical_blocks;
unsigned int logical_block_size;
unsigned int cache_size;
unsigned int block_map_maximum_age;
bool deduplication;
bool compression;
struct thread_count_config thread_counts;
block_count_t max_discard_blocks;
};
enum vdo_completion_type {
VDO_UNSET_COMPLETION_TYPE,
VDO_ACTION_COMPLETION,
VDO_ADMIN_COMPLETION,
VDO_BLOCK_ALLOCATOR_COMPLETION,
VDO_DATA_VIO_POOL_COMPLETION,
VDO_DECREMENT_COMPLETION,
VDO_FLUSH_COMPLETION,
VDO_FLUSH_NOTIFICATION_COMPLETION,
VDO_GENERATION_FLUSHED_COMPLETION,
VDO_HASH_ZONE_COMPLETION,
VDO_HASH_ZONES_COMPLETION,
VDO_LOCK_COUNTER_COMPLETION,
VDO_PAGE_COMPLETION,
VDO_READ_ONLY_MODE_COMPLETION,
VDO_REPAIR_COMPLETION,
VDO_SYNC_COMPLETION,
VIO_COMPLETION,
} __packed;
struct vdo_completion;
typedef void (*vdo_action_fn)(struct vdo_completion *completion);
enum vdo_completion_priority {
BIO_ACK_Q_ACK_PRIORITY = 0,
BIO_ACK_Q_MAX_PRIORITY = 0,
BIO_Q_COMPRESSED_DATA_PRIORITY = 0,
BIO_Q_DATA_PRIORITY = 0,
BIO_Q_FLUSH_PRIORITY = 2,
BIO_Q_HIGH_PRIORITY = 2,
BIO_Q_METADATA_PRIORITY = 1,
BIO_Q_VERIFY_PRIORITY = 1,
BIO_Q_MAX_PRIORITY = 2,
CPU_Q_COMPLETE_VIO_PRIORITY = 0,
CPU_Q_COMPLETE_READ_PRIORITY = 0,
CPU_Q_COMPRESS_BLOCK_PRIORITY = 0,
CPU_Q_EVENT_REPORTER_PRIORITY = 0,
CPU_Q_HASH_BLOCK_PRIORITY = 0,
CPU_Q_MAX_PRIORITY = 0,
UDS_Q_PRIORITY = 0,
UDS_Q_MAX_PRIORITY = 0,
VDO_DEFAULT_Q_COMPLETION_PRIORITY = 1,
VDO_DEFAULT_Q_FLUSH_PRIORITY = 2,
VDO_DEFAULT_Q_MAP_BIO_PRIORITY = 0,
VDO_DEFAULT_Q_SYNC_PRIORITY = 2,
VDO_DEFAULT_Q_VIO_CALLBACK_PRIORITY = 1,
VDO_DEFAULT_Q_MAX_PRIORITY = 2,
VDO_WORK_Q_MAX_PRIORITY = 2,
VDO_WORK_Q_DEFAULT_PRIORITY = VDO_WORK_Q_MAX_PRIORITY + 1,
};
struct vdo_completion {
enum vdo_completion_type type;
bool complete;
bool requeue;
thread_id_t callback_thread_id;
int result;
struct vdo *vdo;
vdo_action_fn callback;
vdo_action_fn error_handler;
void *parent;
struct funnel_queue_entry work_queue_entry_link;
enum vdo_completion_priority priority;
struct vdo_work_queue *my_queue;
};
struct block_allocator;
struct data_vio;
struct vdo;
struct vdo_config;
enum vio_type {
VIO_TYPE_UNINITIALIZED = 0,
VIO_TYPE_DATA,
VIO_TYPE_BLOCK_ALLOCATOR,
VIO_TYPE_BLOCK_MAP,
VIO_TYPE_BLOCK_MAP_INTERIOR,
VIO_TYPE_GEOMETRY,
VIO_TYPE_PARTITION_COPY,
VIO_TYPE_RECOVERY_JOURNAL,
VIO_TYPE_SLAB_JOURNAL,
VIO_TYPE_SLAB_SUMMARY,
VIO_TYPE_SUPER_BLOCK,
} __packed;
enum vio_priority {
VIO_PRIORITY_LOW = 0,
VIO_PRIORITY_DATA = VIO_PRIORITY_LOW,
VIO_PRIORITY_COMPRESSED_DATA = VIO_PRIORITY_DATA,
VIO_PRIORITY_METADATA,
VIO_PRIORITY_HIGH,
} __packed;
struct vio {
struct vdo_completion completion;
zone_count_t bio_zone;
enum vio_priority priority;
enum vio_type type;
unsigned int block_count;
unsigned int io_size;
char *data;
struct bio *bio;
struct bio_list bios_merged;
};
#endif