#ifndef DATA_VIO_H
#define DATA_VIO_H
#include <linux/atomic.h>
#include <linux/bio.h>
#include <linux/list.h>
#include "permassert.h"
#include "indexer.h"
#include "block-map.h"
#include "completion.h"
#include "constants.h"
#include "dedupe.h"
#include "encodings.h"
#include "logical-zone.h"
#include "physical-zone.h"
#include "types.h"
#include "vdo.h"
#include "vio.h"
#include "wait-queue.h"
enum async_operation_number {
MIN_VIO_ASYNC_OPERATION_NUMBER,
VIO_ASYNC_OP_LAUNCH = MIN_VIO_ASYNC_OPERATION_NUMBER,
VIO_ASYNC_OP_ACKNOWLEDGE_WRITE,
VIO_ASYNC_OP_ACQUIRE_VDO_HASH_LOCK,
VIO_ASYNC_OP_ATTEMPT_LOGICAL_BLOCK_LOCK,
VIO_ASYNC_OP_LOCK_DUPLICATE_PBN,
VIO_ASYNC_OP_CHECK_FOR_DUPLICATION,
VIO_ASYNC_OP_CLEANUP,
VIO_ASYNC_OP_COMPRESS_DATA_VIO,
VIO_ASYNC_OP_FIND_BLOCK_MAP_SLOT,
VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_READ,
VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_WRITE,
VIO_ASYNC_OP_HASH_DATA_VIO,
VIO_ASYNC_OP_JOURNAL_REMAPPING,
VIO_ASYNC_OP_ATTEMPT_PACKING,
VIO_ASYNC_OP_PUT_MAPPED_BLOCK,
VIO_ASYNC_OP_READ_DATA_VIO,
VIO_ASYNC_OP_UPDATE_DEDUPE_INDEX,
VIO_ASYNC_OP_UPDATE_REFERENCE_COUNTS,
VIO_ASYNC_OP_VERIFY_DUPLICATION,
VIO_ASYNC_OP_WRITE_DATA_VIO,
MAX_VIO_ASYNC_OPERATION_NUMBER,
} __packed;
struct lbn_lock {
logical_block_number_t lbn;
bool locked;
struct vdo_wait_queue waiters;
struct logical_zone *zone;
};
struct block_map_tree_slot {
page_number_t page_index;
struct block_map_slot block_map_slot;
};
struct tree_lock {
height_t height;
root_count_t root_index;
bool locked;
u64 key;
struct vdo_wait_queue waiters;
struct block_map_tree_slot tree_slots[VDO_BLOCK_MAP_TREE_HEIGHT + 1];
};
struct zoned_pbn {
physical_block_number_t pbn;
enum block_mapping_state state;
struct physical_zone *zone;
};
enum data_vio_compression_stage {
DATA_VIO_PRE_COMPRESSOR,
DATA_VIO_COMPRESSING,
DATA_VIO_PACKING,
DATA_VIO_POST_PACKER,
};
struct data_vio_compression_status {
enum data_vio_compression_stage stage;
bool may_not_compress;
};
struct compression_state {
atomic_t status;
u16 size;
slot_number_t slot;
struct packer_bin *bin;
struct data_vio *next_in_batch;
struct data_vio *lock_holder;
struct compressed_block *block;
};
struct allocation {
struct physical_zone *zone;
physical_block_number_t pbn;
struct pbn_lock *lock;
enum pbn_lock_type write_lock_type;
zone_count_t first_allocation_zone;
bool wait_for_clean_slab;
};
struct reference_updater {
enum journal_operation operation;
bool increment;
struct zoned_pbn zpbn;
struct pbn_lock *lock;
struct vdo_waiter waiter;
};
struct data_vio {
struct vdo_waiter waiter;
struct lbn_lock logical;
struct tree_lock tree_lock;
struct zoned_pbn mapped;
struct uds_record_name record_name;
enum async_operation_number last_async_operation;
struct reference_updater increment_updater;
struct reference_updater decrement_updater;
u16 read : 1;
u16 write : 1;
u16 fua : 1;
u16 is_zero : 1;
u16 is_discard : 1;
u16 is_partial : 1;
u16 is_duplicate : 1;
u16 first_reference_operation_complete : 1;
u16 downgrade_allocation_lock : 1;
struct allocation allocation;
bool allocation_succeeded;
struct zoned_pbn new_mapped;
struct hash_zone *hash_zone;
struct hash_lock *hash_lock;
struct list_head hash_lock_entry;
struct zoned_pbn duplicate;
sequence_number_t recovery_sequence_number;
struct journal_point recovery_journal_point;
struct list_head write_entry;
sequence_number_t flush_generation;
struct vdo_page_completion page_completion;
struct bio *user_bio;
block_size_t offset;
u32 remaining_discard;
struct dedupe_context *dedupe_context;
struct vio vio;
struct vdo_completion decrement_completion;
struct compression_state compression;
char *scratch_block;
struct list_head pool_entry;
};
static inline struct data_vio *vio_as_data_vio(struct vio *vio)
{
VDO_ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio");
return container_of(vio, struct data_vio, vio);
}
static inline struct data_vio *as_data_vio(struct vdo_completion *completion)
{
return vio_as_data_vio(as_vio(completion));
}
static inline struct data_vio *vdo_waiter_as_data_vio(struct vdo_waiter *waiter)
{
if (waiter == NULL)
return NULL;
return container_of(waiter, struct data_vio, waiter);
}
static inline struct data_vio *data_vio_from_reference_updater(struct reference_updater *updater)
{
if (updater->increment)
return container_of(updater, struct data_vio, increment_updater);
return container_of(updater, struct data_vio, decrement_updater);
}
static inline bool data_vio_has_flush_generation_lock(struct data_vio *data_vio)
{
return !list_empty(&data_vio->write_entry);
}
static inline struct vdo *vdo_from_data_vio(struct data_vio *data_vio)
{
return data_vio->vio.completion.vdo;
}
static inline bool data_vio_has_allocation(struct data_vio *data_vio)
{
return (data_vio->allocation.pbn != VDO_ZERO_BLOCK);
}
struct data_vio_compression_status __must_check
advance_data_vio_compression_stage(struct data_vio *data_vio);
struct data_vio_compression_status __must_check
get_data_vio_compression_status(struct data_vio *data_vio);
bool cancel_data_vio_compression(struct data_vio *data_vio);
struct data_vio_pool;
int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
data_vio_count_t discard_limit, struct data_vio_pool **pool_ptr);
void free_data_vio_pool(struct data_vio_pool *pool);
void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio);
void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion);
void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion);
void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios);
data_vio_count_t get_data_vio_pool_active_requests(struct data_vio_pool *pool);
data_vio_count_t get_data_vio_pool_request_limit(struct data_vio_pool *pool);
data_vio_count_t get_data_vio_pool_maximum_requests(struct data_vio_pool *pool);
void complete_data_vio(struct vdo_completion *completion);
void handle_data_vio_error(struct vdo_completion *completion);
static inline void continue_data_vio(struct data_vio *data_vio)
{
vdo_launch_completion(&data_vio->vio.completion);
}
static inline void continue_data_vio_with_error(struct data_vio *data_vio, int result)
{
vdo_continue_completion(&data_vio->vio.completion, result);
}
const char * __must_check get_data_vio_operation_name(struct data_vio *data_vio);
static inline void assert_data_vio_in_hash_zone(struct data_vio *data_vio)
{
thread_id_t expected = data_vio->hash_zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for logical block %llu on thread %u, should be on hash zone thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id, expected);
}
static inline void set_data_vio_hash_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
data_vio->hash_zone->thread_id);
}
static inline void launch_data_vio_hash_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_hash_zone_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
static inline void assert_data_vio_in_logical_zone(struct data_vio *data_vio)
{
thread_id_t expected = data_vio->logical.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for logical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id, expected);
}
static inline void set_data_vio_logical_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
data_vio->logical.zone->thread_id);
}
static inline void launch_data_vio_logical_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_logical_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
static inline void assert_data_vio_in_allocated_zone(struct data_vio *data_vio)
{
thread_id_t expected = data_vio->allocation.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"struct data_vio for allocated physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->allocation.pbn, thread_id,
expected);
}
static inline void set_data_vio_allocated_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
data_vio->allocation.zone->thread_id);
}
static inline void launch_data_vio_allocated_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_allocated_zone_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
static inline void assert_data_vio_in_duplicate_zone(struct data_vio *data_vio)
{
thread_id_t expected = data_vio->duplicate.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for duplicate physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->duplicate.pbn, thread_id,
expected);
}
static inline void set_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
data_vio->duplicate.zone->thread_id);
}
static inline void launch_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_duplicate_zone_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
static inline void assert_data_vio_in_mapped_zone(struct data_vio *data_vio)
{
thread_id_t expected = data_vio->mapped.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for mapped physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->mapped.pbn, thread_id, expected);
}
static inline void set_data_vio_mapped_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
data_vio->mapped.zone->thread_id);
}
static inline void assert_data_vio_in_new_mapped_zone(struct data_vio *data_vio)
{
thread_id_t expected = data_vio->new_mapped.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for new_mapped physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->new_mapped.pbn, thread_id,
expected);
}
static inline void set_data_vio_new_mapped_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
data_vio->new_mapped.zone->thread_id);
}
static inline void assert_data_vio_in_journal_zone(struct data_vio *data_vio)
{
thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((journal_thread == thread_id),
"data_vio for logical block %llu on thread %u, should be on journal thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id,
journal_thread);
}
static inline void set_data_vio_journal_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
vdo_set_completion_callback(&data_vio->vio.completion, callback, journal_thread);
}
static inline void launch_data_vio_journal_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_journal_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
static inline void assert_data_vio_in_packer_zone(struct data_vio *data_vio)
{
thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((packer_thread == thread_id),
"data_vio for logical block %llu on thread %u, should be on packer thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id,
packer_thread);
}
static inline void set_data_vio_packer_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
vdo_set_completion_callback(&data_vio->vio.completion, callback, packer_thread);
}
static inline void launch_data_vio_packer_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_packer_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
static inline void assert_data_vio_on_cpu_thread(struct data_vio *data_vio)
{
thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
thread_id_t thread_id = vdo_get_callback_thread_id();
VDO_ASSERT_LOG_ONLY((cpu_thread == thread_id),
"data_vio for logical block %llu on thread %u, should be on cpu thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id,
cpu_thread);
}
static inline void set_data_vio_cpu_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
vdo_set_completion_callback(&data_vio->vio.completion, callback, cpu_thread);
}
static inline void launch_data_vio_cpu_callback(struct data_vio *data_vio,
vdo_action_fn callback,
enum vdo_completion_priority priority)
{
set_data_vio_cpu_callback(data_vio, callback);
vdo_launch_completion_with_priority(&data_vio->vio.completion, priority);
}
static inline void set_data_vio_bio_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
vdo_set_completion_callback(&data_vio->vio.completion, callback,
get_vio_bio_zone_thread_id(&data_vio->vio));
}
static inline void launch_data_vio_bio_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_bio_zone_callback(data_vio, callback);
vdo_launch_completion_with_priority(&data_vio->vio.completion,
BIO_Q_DATA_PRIORITY);
}
static inline void launch_data_vio_on_bio_ack_queue(struct data_vio *data_vio,
vdo_action_fn callback)
{
struct vdo_completion *completion = &data_vio->vio.completion;
struct vdo *vdo = completion->vdo;
if (!vdo_uses_bio_ack_queue(vdo)) {
callback(completion);
return;
}
vdo_set_completion_callback(completion, callback,
vdo->thread_config.bio_ack_thread);
vdo_launch_completion_with_priority(completion, BIO_ACK_Q_ACK_PRIORITY);
}
void data_vio_allocate_data_block(struct data_vio *data_vio,
enum pbn_lock_type write_lock_type,
vdo_action_fn callback, vdo_action_fn error_handler);
void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset);
int __must_check uncompress_data_vio(struct data_vio *data_vio,
enum block_mapping_state mapping_state,
char *buffer);
void update_metadata_for_data_vio_write(struct data_vio *data_vio,
struct pbn_lock *lock);
void write_data_vio(struct data_vio *data_vio);
void launch_compress_data_vio(struct data_vio *data_vio);
void continue_data_vio_with_block_map_slot(struct vdo_completion *completion);
#endif