#ifndef VDO_ENCODINGS_H
#define VDO_ENCODINGS_H
#include <linux/blk_types.h>
#include <linux/crc32.h>
#include <linux/limits.h>
#include <linux/uuid.h>
#include "numeric.h"
#include "constants.h"
#include "types.h"
struct version_number {
u32 major_version;
u32 minor_version;
};
struct packed_version_number {
__le32 major_version;
__le32 minor_version;
} __packed;
#define VDO_SUPER_BLOCK 0
#define VDO_LAYOUT 1
#define VDO_RECOVERY_JOURNAL 2
#define VDO_SLAB_DEPOT 3
#define VDO_BLOCK_MAP 4
#define VDO_GEOMETRY_BLOCK 5
struct header {
u32 id;
struct version_number version;
size_t size;
};
struct packed_header {
__le32 id;
struct packed_version_number version;
__le64 size;
} __packed;
enum {
VDO_GEOMETRY_BLOCK_LOCATION = 0,
VDO_GEOMETRY_MAGIC_NUMBER_SIZE = 8,
VDO_DEFAULT_GEOMETRY_BLOCK_VERSION = 5,
};
struct index_config {
u32 mem;
u32 unused;
bool sparse;
} __packed;
enum volume_region_id {
VDO_INDEX_REGION = 0,
VDO_DATA_REGION = 1,
VDO_VOLUME_REGION_COUNT,
};
struct volume_region {
enum volume_region_id id;
physical_block_number_t start_block;
} __packed;
struct volume_geometry {
u32 unused;
nonce_t nonce;
uuid_t uuid;
block_count_t bio_offset;
struct volume_region regions[VDO_VOLUME_REGION_COUNT];
struct index_config index_config;
} __packed;
struct volume_geometry_4_0 {
u32 unused;
nonce_t nonce;
uuid_t uuid;
struct volume_region regions[VDO_VOLUME_REGION_COUNT];
struct index_config index_config;
} __packed;
extern const u8 VDO_GEOMETRY_MAGIC_NUMBER[VDO_GEOMETRY_MAGIC_NUMBER_SIZE + 1];
struct block_map_entry {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
unsigned mapping_state : 4;
unsigned pbn_high_nibble : 4;
#else
unsigned pbn_high_nibble : 4;
unsigned mapping_state : 4;
#endif
__le32 pbn_low_word;
} __packed;
struct block_map_page_header {
__le64 nonce;
__le64 pbn;
u8 unused_long_word[8];
bool initialized;
u8 unused_byte1;
u8 unused_byte2;
u8 unused_byte3;
} __packed;
struct block_map_page {
struct packed_version_number version;
struct block_map_page_header header;
struct block_map_entry entries[];
} __packed;
enum block_map_page_validity {
VDO_BLOCK_MAP_PAGE_VALID,
VDO_BLOCK_MAP_PAGE_INVALID,
VDO_BLOCK_MAP_PAGE_BAD,
};
struct block_map_state_2_0 {
physical_block_number_t flat_page_origin;
block_count_t flat_page_count;
physical_block_number_t root_origin;
block_count_t root_count;
} __packed;
struct boundary {
page_number_t levels[VDO_BLOCK_MAP_TREE_HEIGHT];
};
extern const struct header VDO_BLOCK_MAP_HEADER_2_0;
struct recovery_journal_state_7_0 {
sequence_number_t journal_start;
block_count_t logical_blocks_used;
block_count_t block_map_data_blocks;
} __packed;
extern const struct header VDO_RECOVERY_JOURNAL_HEADER_7_0;
typedef u16 journal_entry_count_t;
struct recovery_journal_entry {
struct block_map_slot slot;
struct data_location mapping;
struct data_location unmapping;
enum journal_operation operation;
};
struct packed_recovery_journal_entry {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
unsigned operation : 2;
unsigned slot_low : 6;
unsigned slot_high : 4;
unsigned pbn_high_nibble : 4;
#else
unsigned slot_low : 6;
unsigned operation : 2;
unsigned pbn_high_nibble : 4;
unsigned slot_high : 4;
#endif
__le32 pbn_low_word;
struct block_map_entry mapping;
struct block_map_entry unmapping;
} __packed;
struct packed_recovery_journal_entry_1 {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
unsigned operation : 2;
unsigned slot_low : 6;
unsigned slot_high : 4;
unsigned pbn_high_nibble : 4;
#else
unsigned slot_low : 6;
unsigned operation : 2;
unsigned pbn_high_nibble : 4;
unsigned slot_high : 4;
#endif
__le32 pbn_low_word;
struct block_map_entry block_map_entry;
} __packed;
enum journal_operation_1 {
VDO_JOURNAL_DATA_DECREMENT = 0,
VDO_JOURNAL_DATA_INCREMENT = 1,
VDO_JOURNAL_BLOCK_MAP_DECREMENT = 2,
VDO_JOURNAL_BLOCK_MAP_INCREMENT = 3,
} __packed;
struct recovery_block_header {
sequence_number_t block_map_head;
sequence_number_t slab_journal_head;
sequence_number_t sequence_number;
nonce_t nonce;
block_count_t logical_blocks_used;
block_count_t block_map_data_blocks;
journal_entry_count_t entry_count;
u8 check_byte;
u8 recovery_count;
enum vdo_metadata_type metadata_type;
};
struct packed_journal_header {
__le64 block_map_head;
__le64 slab_journal_head;
__le64 sequence_number;
__le64 nonce;
u8 metadata_type;
__le16 entry_count;
__le64 logical_blocks_used;
__le64 block_map_data_blocks;
u8 check_byte;
u8 recovery_count;
} __packed;
struct packed_journal_sector {
u8 check_byte;
u8 recovery_count;
u8 entry_count;
struct packed_recovery_journal_entry entries[];
} __packed;
enum {
RECOVERY_JOURNAL_ENTRIES_PER_SECTOR =
((VDO_SECTOR_SIZE - sizeof(struct packed_journal_sector)) /
sizeof(struct packed_recovery_journal_entry)),
RECOVERY_JOURNAL_ENTRIES_PER_BLOCK = RECOVERY_JOURNAL_ENTRIES_PER_SECTOR * 7,
RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK = 311,
RECOVERY_JOURNAL_1_ENTRIES_PER_SECTOR =
((VDO_SECTOR_SIZE - sizeof(struct packed_journal_sector)) /
sizeof(struct packed_recovery_journal_entry_1)),
RECOVERY_JOURNAL_1_ENTRIES_IN_LAST_SECTOR =
(RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK % RECOVERY_JOURNAL_1_ENTRIES_PER_SECTOR),
};
typedef u8 vdo_refcount_t;
struct journal_point {
sequence_number_t sequence_number;
journal_entry_count_t entry_count;
};
struct packed_journal_point {
__le64 encoded_point;
} __packed;
#define EMPTY_REFERENCE_COUNT 0
enum {
MAXIMUM_REFERENCE_COUNT = 254,
PROVISIONAL_REFERENCE_COUNT = 255,
};
enum {
COUNTS_PER_SECTOR =
((VDO_SECTOR_SIZE - sizeof(struct packed_journal_point)) / sizeof(vdo_refcount_t)),
COUNTS_PER_BLOCK = COUNTS_PER_SECTOR * VDO_SECTORS_PER_BLOCK,
};
struct packed_reference_sector {
struct packed_journal_point commit_point;
vdo_refcount_t counts[COUNTS_PER_SECTOR];
} __packed;
struct packed_reference_block {
struct packed_reference_sector sectors[VDO_SECTORS_PER_BLOCK];
};
struct slab_depot_state_2_0 {
struct slab_config slab_config;
physical_block_number_t first_block;
physical_block_number_t last_block;
zone_count_t zone_count;
} __packed;
extern const struct header VDO_SLAB_DEPOT_HEADER_2_0;
struct slab_journal_entry {
slab_block_number sbn;
enum journal_operation operation;
bool increment;
};
typedef struct {
u8 offset_low8;
u8 offset_mid8;
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
unsigned offset_high7 : 7;
unsigned increment : 1;
#else
unsigned increment : 1;
unsigned offset_high7 : 7;
#endif
} __packed packed_slab_journal_entry;
struct slab_journal_block_header {
sequence_number_t head;
sequence_number_t sequence_number;
nonce_t nonce;
struct journal_point recovery_point;
enum vdo_metadata_type metadata_type;
bool has_block_map_increments;
journal_entry_count_t entry_count;
};
struct packed_slab_journal_block_header {
__le64 head;
__le64 sequence_number;
struct packed_journal_point recovery_point;
__le64 nonce;
u8 metadata_type;
bool has_block_map_increments;
__le16 entry_count;
} __packed;
enum {
VDO_SLAB_JOURNAL_PAYLOAD_SIZE =
VDO_BLOCK_SIZE - sizeof(struct packed_slab_journal_block_header),
VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK = (VDO_SLAB_JOURNAL_PAYLOAD_SIZE * 8) / 25,
VDO_SLAB_JOURNAL_ENTRY_TYPES_SIZE =
((VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK - 1) / 8) + 1,
VDO_SLAB_JOURNAL_ENTRIES_PER_BLOCK =
(VDO_SLAB_JOURNAL_PAYLOAD_SIZE / sizeof(packed_slab_journal_entry)),
};
struct full_slab_journal_entries {
packed_slab_journal_entry entries[VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK];
u8 entry_types[VDO_SLAB_JOURNAL_ENTRY_TYPES_SIZE];
} __packed;
typedef union {
struct full_slab_journal_entries full_entries;
packed_slab_journal_entry entries[VDO_SLAB_JOURNAL_ENTRIES_PER_BLOCK];
u8 space[VDO_SLAB_JOURNAL_PAYLOAD_SIZE];
} __packed slab_journal_payload;
struct packed_slab_journal_block {
struct packed_slab_journal_block_header header;
slab_journal_payload payload;
} __packed;
typedef u8 tail_block_offset_t;
struct slab_summary_entry {
tail_block_offset_t tail_block_offset;
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
unsigned int fullness_hint : 6;
unsigned int load_ref_counts : 1;
unsigned int is_dirty : 1;
#else
unsigned int is_dirty : 1;
unsigned int load_ref_counts : 1;
unsigned int fullness_hint : 6;
#endif
} __packed;
enum {
VDO_SLAB_SUMMARY_FULLNESS_HINT_BITS = 6,
VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK = VDO_BLOCK_SIZE / sizeof(struct slab_summary_entry),
VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE = MAX_VDO_SLABS / VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK,
VDO_SLAB_SUMMARY_BLOCKS = VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE * MAX_VDO_PHYSICAL_ZONES,
};
struct layout {
physical_block_number_t start;
block_count_t size;
physical_block_number_t first_free;
physical_block_number_t last_free;
size_t num_partitions;
struct partition *head;
};
struct partition {
enum partition_id id;
physical_block_number_t offset;
block_count_t count;
struct partition *next;
};
struct layout_3_0 {
physical_block_number_t first_free;
physical_block_number_t last_free;
u8 partition_count;
} __packed;
struct partition_3_0 {
enum partition_id id;
physical_block_number_t offset;
physical_block_number_t base;
block_count_t count;
} __packed;
struct vdo_config {
block_count_t logical_blocks;
block_count_t physical_blocks;
block_count_t slab_size;
block_count_t recovery_journal_size;
block_count_t slab_journal_blocks;
};
struct vdo_component {
enum vdo_state state;
u64 complete_recoveries;
u64 read_only_recoveries;
struct vdo_config config;
nonce_t nonce;
};
struct packed_vdo_config {
__le64 logical_blocks;
__le64 physical_blocks;
__le64 slab_size;
__le64 recovery_journal_size;
__le64 slab_journal_blocks;
} __packed;
struct packed_vdo_component_41_0 {
__le32 state;
__le64 complete_recoveries;
__le64 read_only_recoveries;
struct packed_vdo_config config;
__le64 nonce;
} __packed;
extern const struct version_number VDO_VOLUME_VERSION_67_0;
enum {
VDO_ENCODED_HEADER_SIZE = sizeof(struct packed_header),
BLOCK_MAP_COMPONENT_ENCODED_SIZE =
VDO_ENCODED_HEADER_SIZE + sizeof(struct block_map_state_2_0),
RECOVERY_JOURNAL_COMPONENT_ENCODED_SIZE =
VDO_ENCODED_HEADER_SIZE + sizeof(struct recovery_journal_state_7_0),
SLAB_DEPOT_COMPONENT_ENCODED_SIZE =
VDO_ENCODED_HEADER_SIZE + sizeof(struct slab_depot_state_2_0),
VDO_PARTITION_COUNT = 4,
VDO_LAYOUT_ENCODED_SIZE = (VDO_ENCODED_HEADER_SIZE +
sizeof(struct layout_3_0) +
(sizeof(struct partition_3_0) * VDO_PARTITION_COUNT)),
VDO_SUPER_BLOCK_FIXED_SIZE = VDO_ENCODED_HEADER_SIZE + sizeof(u32),
VDO_MAX_COMPONENT_DATA_SIZE = VDO_SECTOR_SIZE - VDO_SUPER_BLOCK_FIXED_SIZE,
VDO_COMPONENT_ENCODED_SIZE =
(sizeof(struct packed_version_number) + sizeof(struct packed_vdo_component_41_0)),
VDO_COMPONENT_DATA_OFFSET = VDO_ENCODED_HEADER_SIZE,
VDO_COMPONENT_DATA_SIZE = (sizeof(u32) +
sizeof(struct packed_version_number) +
VDO_COMPONENT_ENCODED_SIZE +
VDO_LAYOUT_ENCODED_SIZE +
RECOVERY_JOURNAL_COMPONENT_ENCODED_SIZE +
SLAB_DEPOT_COMPONENT_ENCODED_SIZE +
BLOCK_MAP_COMPONENT_ENCODED_SIZE),
};
struct vdo_component_states {
u32 unused;
struct version_number volume_version;
struct vdo_component vdo;
struct block_map_state_2_0 block_map;
struct recovery_journal_state_7_0 recovery_journal;
struct slab_depot_state_2_0 slab_depot;
struct layout layout;
};
static inline bool vdo_are_same_version(struct version_number version_a,
struct version_number version_b)
{
return ((version_a.major_version == version_b.major_version) &&
(version_a.minor_version == version_b.minor_version));
}
static inline struct packed_version_number vdo_pack_version_number(struct version_number version)
{
return (struct packed_version_number) {
.major_version = __cpu_to_le32(version.major_version),
.minor_version = __cpu_to_le32(version.minor_version),
};
}
static inline struct version_number vdo_unpack_version_number(struct packed_version_number version)
{
return (struct version_number) {
.major_version = __le32_to_cpu(version.major_version),
.minor_version = __le32_to_cpu(version.minor_version),
};
}
static inline struct packed_header vdo_pack_header(const struct header *header)
{
return (struct packed_header) {
.id = __cpu_to_le32(header->id),
.version = vdo_pack_version_number(header->version),
.size = __cpu_to_le64(header->size),
};
}
static inline struct header vdo_unpack_header(const struct packed_header *header)
{
return (struct header) {
.id = __le32_to_cpu(header->id),
.version = vdo_unpack_version_number(header->version),
.size = __le64_to_cpu(header->size),
};
}
static inline physical_block_number_t __must_check
vdo_get_index_region_start(struct volume_geometry geometry)
{
return geometry.regions[VDO_INDEX_REGION].start_block;
}
static inline physical_block_number_t __must_check
vdo_get_data_region_start(struct volume_geometry geometry)
{
return geometry.regions[VDO_DATA_REGION].start_block;
}
static inline physical_block_number_t __must_check
vdo_get_index_region_size(struct volume_geometry geometry)
{
return vdo_get_data_region_start(geometry) -
vdo_get_index_region_start(geometry);
}
int __must_check vdo_parse_geometry_block(unsigned char *block,
struct volume_geometry *geometry);
static inline bool vdo_is_state_compressed(const enum block_mapping_state mapping_state)
{
return (mapping_state > VDO_MAPPING_STATE_UNCOMPRESSED);
}
static inline struct block_map_entry
vdo_pack_block_map_entry(physical_block_number_t pbn, enum block_mapping_state mapping_state)
{
return (struct block_map_entry) {
.mapping_state = (mapping_state & 0x0F),
.pbn_high_nibble = ((pbn >> 32) & 0x0F),
.pbn_low_word = __cpu_to_le32(pbn & UINT_MAX),
};
}
static inline struct data_location vdo_unpack_block_map_entry(const struct block_map_entry *entry)
{
physical_block_number_t low32 = __le32_to_cpu(entry->pbn_low_word);
physical_block_number_t high4 = entry->pbn_high_nibble;
return (struct data_location) {
.pbn = ((high4 << 32) | low32),
.state = entry->mapping_state,
};
}
static inline bool vdo_is_mapped_location(const struct data_location *location)
{
return (location->state != VDO_MAPPING_STATE_UNMAPPED);
}
static inline bool vdo_is_valid_location(const struct data_location *location)
{
if (location->pbn == VDO_ZERO_BLOCK)
return !vdo_is_state_compressed(location->state);
else
return vdo_is_mapped_location(location);
}
static inline physical_block_number_t __must_check
vdo_get_block_map_page_pbn(const struct block_map_page *page)
{
return __le64_to_cpu(page->header.pbn);
}
struct block_map_page *vdo_format_block_map_page(void *buffer, nonce_t nonce,
physical_block_number_t pbn,
bool initialized);
enum block_map_page_validity __must_check vdo_validate_block_map_page(struct block_map_page *page,
nonce_t nonce,
physical_block_number_t pbn);
static inline page_count_t vdo_compute_block_map_page_count(block_count_t entries)
{
return DIV_ROUND_UP(entries, VDO_BLOCK_MAP_ENTRIES_PER_PAGE);
}
block_count_t __must_check vdo_compute_new_forest_pages(root_count_t root_count,
struct boundary *old_sizes,
block_count_t entries,
struct boundary *new_sizes);
static inline struct packed_recovery_journal_entry
vdo_pack_recovery_journal_entry(const struct recovery_journal_entry *entry)
{
return (struct packed_recovery_journal_entry) {
.operation = entry->operation,
.slot_low = entry->slot.slot & 0x3F,
.slot_high = (entry->slot.slot >> 6) & 0x0F,
.pbn_high_nibble = (entry->slot.pbn >> 32) & 0x0F,
.pbn_low_word = __cpu_to_le32(entry->slot.pbn & UINT_MAX),
.mapping = vdo_pack_block_map_entry(entry->mapping.pbn,
entry->mapping.state),
.unmapping = vdo_pack_block_map_entry(entry->unmapping.pbn,
entry->unmapping.state),
};
}
static inline struct recovery_journal_entry
vdo_unpack_recovery_journal_entry(const struct packed_recovery_journal_entry *entry)
{
physical_block_number_t low32 = __le32_to_cpu(entry->pbn_low_word);
physical_block_number_t high4 = entry->pbn_high_nibble;
return (struct recovery_journal_entry) {
.operation = entry->operation,
.slot = {
.pbn = ((high4 << 32) | low32),
.slot = (entry->slot_low | (entry->slot_high << 6)),
},
.mapping = vdo_unpack_block_map_entry(&entry->mapping),
.unmapping = vdo_unpack_block_map_entry(&entry->unmapping),
};
}
const char * __must_check vdo_get_journal_operation_name(enum journal_operation operation);
static inline bool __must_check
vdo_is_valid_recovery_journal_sector(const struct recovery_block_header *header,
const struct packed_journal_sector *sector,
u8 sector_number)
{
if ((header->check_byte != sector->check_byte) ||
(header->recovery_count != sector->recovery_count))
return false;
if (header->metadata_type == VDO_METADATA_RECOVERY_JOURNAL_2)
return sector->entry_count <= RECOVERY_JOURNAL_ENTRIES_PER_SECTOR;
if (sector_number == 7)
return sector->entry_count <= RECOVERY_JOURNAL_1_ENTRIES_IN_LAST_SECTOR;
return sector->entry_count <= RECOVERY_JOURNAL_1_ENTRIES_PER_SECTOR;
}
static inline physical_block_number_t __must_check
vdo_compute_recovery_journal_block_number(block_count_t journal_size,
sequence_number_t sequence_number)
{
return (sequence_number & (journal_size - 1));
}
static inline struct packed_journal_sector * __must_check
vdo_get_journal_block_sector(struct packed_journal_header *header, int sector_number)
{
char *sector_data = ((char *) header) + (VDO_SECTOR_SIZE * sector_number);
return (struct packed_journal_sector *) sector_data;
}
static inline void vdo_pack_recovery_block_header(const struct recovery_block_header *header,
struct packed_journal_header *packed)
{
*packed = (struct packed_journal_header) {
.block_map_head = __cpu_to_le64(header->block_map_head),
.slab_journal_head = __cpu_to_le64(header->slab_journal_head),
.sequence_number = __cpu_to_le64(header->sequence_number),
.nonce = __cpu_to_le64(header->nonce),
.logical_blocks_used = __cpu_to_le64(header->logical_blocks_used),
.block_map_data_blocks = __cpu_to_le64(header->block_map_data_blocks),
.entry_count = __cpu_to_le16(header->entry_count),
.check_byte = header->check_byte,
.recovery_count = header->recovery_count,
.metadata_type = header->metadata_type,
};
}
static inline struct recovery_block_header
vdo_unpack_recovery_block_header(const struct packed_journal_header *packed)
{
return (struct recovery_block_header) {
.block_map_head = __le64_to_cpu(packed->block_map_head),
.slab_journal_head = __le64_to_cpu(packed->slab_journal_head),
.sequence_number = __le64_to_cpu(packed->sequence_number),
.nonce = __le64_to_cpu(packed->nonce),
.logical_blocks_used = __le64_to_cpu(packed->logical_blocks_used),
.block_map_data_blocks = __le64_to_cpu(packed->block_map_data_blocks),
.entry_count = __le16_to_cpu(packed->entry_count),
.check_byte = packed->check_byte,
.recovery_count = packed->recovery_count,
.metadata_type = packed->metadata_type,
};
}
static inline slab_count_t vdo_compute_slab_count(physical_block_number_t first_block,
physical_block_number_t last_block,
unsigned int slab_size_shift)
{
return (slab_count_t) ((last_block - first_block) >> slab_size_shift);
}
int __must_check vdo_configure_slab_depot(const struct partition *partition,
struct slab_config slab_config,
zone_count_t zone_count,
struct slab_depot_state_2_0 *state);
int __must_check vdo_configure_slab(block_count_t slab_size,
block_count_t slab_journal_blocks,
struct slab_config *slab_config);
static inline block_count_t vdo_get_saved_reference_count_size(block_count_t block_count)
{
return DIV_ROUND_UP(block_count, COUNTS_PER_BLOCK);
}
static inline physical_block_number_t __must_check
vdo_get_slab_journal_start_block(const struct slab_config *slab_config,
physical_block_number_t origin)
{
return origin + slab_config->data_blocks + slab_config->reference_count_blocks;
}
static inline void vdo_advance_journal_point(struct journal_point *point,
journal_entry_count_t entries_per_block)
{
point->entry_count++;
if (point->entry_count == entries_per_block) {
point->sequence_number++;
point->entry_count = 0;
}
}
static inline bool vdo_before_journal_point(const struct journal_point *first,
const struct journal_point *second)
{
return ((first->sequence_number < second->sequence_number) ||
((first->sequence_number == second->sequence_number) &&
(first->entry_count < second->entry_count)));
}
static inline void vdo_pack_journal_point(const struct journal_point *unpacked,
struct packed_journal_point *packed)
{
packed->encoded_point =
__cpu_to_le64((unpacked->sequence_number << 16) | unpacked->entry_count);
}
static inline void vdo_unpack_journal_point(const struct packed_journal_point *packed,
struct journal_point *unpacked)
{
u64 native = __le64_to_cpu(packed->encoded_point);
unpacked->sequence_number = (native >> 16);
unpacked->entry_count = (native & 0xffff);
}
static inline void
vdo_pack_slab_journal_block_header(const struct slab_journal_block_header *header,
struct packed_slab_journal_block_header *packed)
{
packed->head = __cpu_to_le64(header->head);
packed->sequence_number = __cpu_to_le64(header->sequence_number);
packed->nonce = __cpu_to_le64(header->nonce);
packed->entry_count = __cpu_to_le16(header->entry_count);
packed->metadata_type = header->metadata_type;
packed->has_block_map_increments = header->has_block_map_increments;
vdo_pack_journal_point(&header->recovery_point, &packed->recovery_point);
}
static inline void
vdo_unpack_slab_journal_block_header(const struct packed_slab_journal_block_header *packed,
struct slab_journal_block_header *header)
{
*header = (struct slab_journal_block_header) {
.head = __le64_to_cpu(packed->head),
.sequence_number = __le64_to_cpu(packed->sequence_number),
.nonce = __le64_to_cpu(packed->nonce),
.entry_count = __le16_to_cpu(packed->entry_count),
.metadata_type = packed->metadata_type,
.has_block_map_increments = packed->has_block_map_increments,
};
vdo_unpack_journal_point(&packed->recovery_point, &header->recovery_point);
}
static inline void vdo_pack_slab_journal_entry(packed_slab_journal_entry *packed,
slab_block_number sbn, bool is_increment)
{
packed->offset_low8 = (sbn & 0x0000FF);
packed->offset_mid8 = (sbn & 0x00FF00) >> 8;
packed->offset_high7 = (sbn & 0x7F0000) >> 16;
packed->increment = is_increment ? 1 : 0;
}
static inline struct slab_journal_entry __must_check
vdo_unpack_slab_journal_entry(const packed_slab_journal_entry *packed)
{
struct slab_journal_entry entry;
entry.sbn = packed->offset_high7;
entry.sbn <<= 8;
entry.sbn |= packed->offset_mid8;
entry.sbn <<= 8;
entry.sbn |= packed->offset_low8;
entry.operation = VDO_JOURNAL_DATA_REMAPPING;
entry.increment = packed->increment;
return entry;
}
struct slab_journal_entry __must_check
vdo_decode_slab_journal_entry(struct packed_slab_journal_block *block,
journal_entry_count_t entry_count);
static inline u8 __must_check vdo_get_slab_summary_hint_shift(unsigned int slab_size_shift)
{
return ((slab_size_shift > VDO_SLAB_SUMMARY_FULLNESS_HINT_BITS) ?
(slab_size_shift - VDO_SLAB_SUMMARY_FULLNESS_HINT_BITS) :
0);
}
int __must_check vdo_initialize_layout(block_count_t size,
physical_block_number_t offset,
block_count_t block_map_blocks,
block_count_t journal_blocks,
block_count_t summary_blocks,
struct layout *layout);
void vdo_uninitialize_layout(struct layout *layout);
int __must_check vdo_get_partition(struct layout *layout, enum partition_id id,
struct partition **partition_ptr);
struct partition * __must_check vdo_get_known_partition(struct layout *layout,
enum partition_id id);
int vdo_validate_config(const struct vdo_config *config,
block_count_t physical_block_count,
block_count_t logical_block_count);
void vdo_destroy_component_states(struct vdo_component_states *states);
int __must_check vdo_decode_component_states(u8 *buffer,
struct volume_geometry *geometry,
struct vdo_component_states *states);
int __must_check vdo_validate_component_states(struct vdo_component_states *states,
nonce_t geometry_nonce,
block_count_t physical_size,
block_count_t logical_size);
void vdo_encode_super_block(u8 *buffer, struct vdo_component_states *states);
int __must_check vdo_decode_super_block(u8 *buffer);
static inline u32 vdo_crc32(const void *buf, unsigned long len)
{
return (crc32(0L, buf, len) ^ ~0L);
}
#endif