page_cache
struct vdo_page_cache *cache = &zone->page_cache;
save_pages(&zone->page_cache);
zone->page_cache.zone = zone;
zone->page_cache.vdo = vdo;
zone->page_cache.page_count = cache_size / map->zone_count;
zone->page_cache.stats.free_pages = zone->page_cache.page_count;
result = allocate_cache_components(&zone->page_cache);
INIT_LIST_HEAD(&zone->page_cache.lru_list);
INIT_LIST_HEAD(&zone->page_cache.outgoing_list);
struct vdo_page_cache *cache = &zone->page_cache;
&(map->zones[zone].page_cache.stats);
(zone->page_cache.outstanding_reads == 0) &&
(zone->page_cache.outstanding_writes == 0)) {
struct vdo_page_cache page_cache;
static void invalidate_page(struct page_cache *cache, u32 physical_page)
invalidate_page(&volume->page_cache, first_page + i);
static inline union invalidate_counter get_invalidate_counter(struct page_cache *cache,
page = select_victim_in_cache(&volume->page_cache);
cancel_page_in_cache(&volume->page_cache, physical_page, page);
result = put_page_in_cache(&volume->page_cache, physical_page, page);
cancel_page_in_cache(&volume->page_cache, physical_page, page);
static inline void set_invalidate_counter(struct page_cache *cache,
static void begin_pending_search(struct page_cache *cache, u32 physical_page,
for (i = 0; i < volume->page_cache.indexable_pages; i++)
volume->page_cache.index[i] = volume->page_cache.cache_slots;
for (i = 0; i < volume->page_cache.cache_slots; i++)
clear_cache_page(&volume->page_cache, &volume->page_cache.cache[i]);
static void end_pending_search(struct page_cache *cache, unsigned int zone_number)
static int __must_check initialize_page_cache(struct page_cache *cache,
result = initialize_page_cache(&volume->page_cache, geometry,
volume->cache_size += volume->page_cache.cache_slots * sizeof(struct delta_index_page);
static void uninitialize_page_cache(struct page_cache *cache)
static void wait_for_pending_searches(struct page_cache *cache, u32 physical_page)
uninitialize_page_cache(&volume->page_cache);
static void clear_cache_page(struct page_cache *cache, struct cached_page *page)
static void make_page_most_recent(struct page_cache *cache, struct cached_page *page)
static struct cached_page *select_victim_in_cache(struct page_cache *cache)
static int put_page_in_cache(struct page_cache *cache, u32 physical_page,
static void cancel_page_in_cache(struct page_cache *cache, u32 physical_page,
static inline bool read_queue_is_full(struct page_cache *cache)
static bool enqueue_read(struct page_cache *cache, struct uds_request *request,
while (!enqueue_read(&volume->page_cache, request, physical_page)) {
static struct queued_read *reserve_read_queue_entry(struct page_cache *cache)
queue_entry = reserve_read_queue_entry(&volume->page_cache);
page = select_victim_in_cache(&volume->page_cache);
cancel_page_in_cache(&volume->page_cache, page_number, page);
cancel_page_in_cache(&volume->page_cache, page_number, page);
cancel_page_in_cache(&volume->page_cache, page_number, page);
result = put_page_in_cache(&volume->page_cache, page_number, page);
cancel_page_in_cache(&volume->page_cache, page_number, page);
struct page_cache *cache = &volume->page_cache;
static void get_page_and_index(struct page_cache *cache, u32 physical_page,
static void get_page_from_cache(struct page_cache *cache, u32 physical_page,
page = select_victim_in_cache(&volume->page_cache);
cancel_page_in_cache(&volume->page_cache, physical_page, page);
cancel_page_in_cache(&volume->page_cache, physical_page, page);
result = put_page_in_cache(&volume->page_cache, physical_page, page);
cancel_page_in_cache(&volume->page_cache, physical_page, page);
get_page_from_cache(&volume->page_cache, physical_page, &page);
make_page_most_recent(&volume->page_cache, page);
get_page_from_cache(&volume->page_cache, physical_page, &page);
make_page_most_recent(&volume->page_cache, page);
end_pending_search(&volume->page_cache, zone_number);
get_page_from_cache(&volume->page_cache, physical_page, &page);
begin_pending_search(&volume->page_cache, physical_page, zone_number);
begin_pending_search(&volume->page_cache, physical_page, zone_number);
begin_pending_search(&volume->page_cache, physical_page, zone_number);
end_pending_search(&volume->page_cache, zone_number);
end_pending_search(&volume->page_cache, zone_number);
begin_pending_search(&volume->page_cache, physical_page, zone_number);
end_pending_search(&volume->page_cache, zone_number);
end_pending_search(&volume->page_cache, zone_number);
struct page_cache page_cache;
vdo->block_map->zones[0].page_cache.rebuilding =
repair->completion.vdo->block_map->zones[0].page_cache.rebuilding = false;
struct vdo_page_cache *cache = &vdo->block_map->zones[0].page_cache;
struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
page_frag_cache_drain(&priv->rx[i].page_cache);
frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC);
INIT_LIST_HEAD(&dev->wed_rro.page_cache);
if (!list_empty(&dev->wed_rro.page_cache)) {
p = list_first_entry(&dev->wed_rro.page_cache,
list_add(&p->list, &dev->wed_rro.page_cache);
struct list_head page_cache;