#ifndef _MM_SWAP_H
#define _MM_SWAP_H
#include <linux/atomic.h>
struct mempolicy;
struct swap_iocb;
extern int page_cluster;
#ifdef CONFIG_THP_SWAP
#define SWAPFILE_CLUSTER HPAGE_PMD_NR
#define swap_entry_order(order) (order)
#else
#define SWAPFILE_CLUSTER 256
#define swap_entry_order(order) 0
#endif
extern struct swap_info_struct *swap_info[];
struct swap_cluster_info {
spinlock_t lock;
u16 count;
u8 flags;
u8 order;
atomic_long_t __rcu *table;
struct list_head list;
};
enum swap_cluster_flags {
CLUSTER_FLAG_NONE = 0,
CLUSTER_FLAG_FREE,
CLUSTER_FLAG_NONFULL,
CLUSTER_FLAG_FRAG,
CLUSTER_FLAG_USABLE = CLUSTER_FLAG_FRAG,
CLUSTER_FLAG_FULL,
CLUSTER_FLAG_DISCARD,
CLUSTER_FLAG_MAX,
};
#ifdef CONFIG_SWAP
#include <linux/swapops.h>
#include <linux/blk_types.h>
static inline unsigned int swp_cluster_offset(swp_entry_t entry)
{
return swp_offset(entry) % SWAPFILE_CLUSTER;
}
static inline struct swap_info_struct *__swap_type_to_info(int type)
{
struct swap_info_struct *si;
si = READ_ONCE(swap_info[type]);
VM_WARN_ON_ONCE(percpu_ref_is_zero(&si->users));
return si;
}
static inline struct swap_info_struct *__swap_entry_to_info(swp_entry_t entry)
{
return __swap_type_to_info(swp_type(entry));
}
static inline struct swap_cluster_info *__swap_offset_to_cluster(
struct swap_info_struct *si, pgoff_t offset)
{
VM_WARN_ON_ONCE(percpu_ref_is_zero(&si->users));
VM_WARN_ON_ONCE(offset >= si->max);
return &si->cluster_info[offset / SWAPFILE_CLUSTER];
}
static inline struct swap_cluster_info *__swap_entry_to_cluster(swp_entry_t entry)
{
return __swap_offset_to_cluster(__swap_entry_to_info(entry),
swp_offset(entry));
}
static __always_inline struct swap_cluster_info *__swap_cluster_lock(
struct swap_info_struct *si, unsigned long offset, bool irq)
{
struct swap_cluster_info *ci = __swap_offset_to_cluster(si, offset);
VM_WARN_ON_ONCE(!in_task());
VM_WARN_ON_ONCE(percpu_ref_is_zero(&si->users));
if (irq)
spin_lock_irq(&ci->lock);
else
spin_lock(&ci->lock);
return ci;
}
static inline struct swap_cluster_info *swap_cluster_lock(
struct swap_info_struct *si, unsigned long offset)
{
return __swap_cluster_lock(si, offset, false);
}
static inline struct swap_cluster_info *__swap_cluster_get_and_lock(
const struct folio *folio, bool irq)
{
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
VM_WARN_ON_ONCE_FOLIO(!folio_test_swapcache(folio), folio);
return __swap_cluster_lock(__swap_entry_to_info(folio->swap),
swp_offset(folio->swap), irq);
}
static inline struct swap_cluster_info *swap_cluster_get_and_lock(
const struct folio *folio)
{
return __swap_cluster_get_and_lock(folio, false);
}
static inline struct swap_cluster_info *swap_cluster_get_and_lock_irq(
const struct folio *folio)
{
return __swap_cluster_get_and_lock(folio, true);
}
static inline void swap_cluster_unlock(struct swap_cluster_info *ci)
{
spin_unlock(&ci->lock);
}
static inline void swap_cluster_unlock_irq(struct swap_cluster_info *ci)
{
spin_unlock_irq(&ci->lock);
}
int folio_alloc_swap(struct folio *folio);
int folio_dup_swap(struct folio *folio, struct page *subpage);
void folio_put_swap(struct folio *folio, struct page *subpage);
extern void swap_entries_free(struct swap_info_struct *si,
struct swap_cluster_info *ci,
unsigned long offset, unsigned int nr_pages);
int sio_pool_init(void);
struct swap_iocb;
void swap_read_folio(struct folio *folio, struct swap_iocb **plug);
void __swap_read_unplug(struct swap_iocb *plug);
static inline void swap_read_unplug(struct swap_iocb *plug)
{
if (unlikely(plug))
__swap_read_unplug(plug);
}
void swap_write_unplug(struct swap_iocb *sio);
int swap_writeout(struct folio *folio, struct swap_iocb **swap_plug);
void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug);
extern struct address_space swap_space __read_mostly;
static inline struct address_space *swap_address_space(swp_entry_t entry)
{
return &swap_space;
}
static inline loff_t swap_dev_pos(swp_entry_t entry)
{
return ((loff_t)swp_offset(entry)) << PAGE_SHIFT;
}
static inline bool folio_matches_swap_entry(const struct folio *folio,
swp_entry_t entry)
{
swp_entry_t folio_entry = folio->swap;
long nr_pages = folio_nr_pages(folio);
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
if (!folio_test_swapcache(folio))
return false;
VM_WARN_ON_ONCE_FOLIO(!IS_ALIGNED(folio_entry.val, nr_pages), folio);
return folio_entry.val == round_down(entry.val, nr_pages);
}
bool swap_cache_has_folio(swp_entry_t entry);
struct folio *swap_cache_get_folio(swp_entry_t entry);
void *swap_cache_get_shadow(swp_entry_t entry);
void swap_cache_del_folio(struct folio *folio);
struct folio *swap_cache_alloc_folio(swp_entry_t entry, gfp_t gfp_flags,
struct mempolicy *mpol, pgoff_t ilx,
bool *alloced);
void __swap_cache_add_folio(struct swap_cluster_info *ci,
struct folio *folio, swp_entry_t entry);
void __swap_cache_del_folio(struct swap_cluster_info *ci,
struct folio *folio, swp_entry_t entry, void *shadow);
void __swap_cache_replace_folio(struct swap_cluster_info *ci,
struct folio *old, struct folio *new);
void __swap_cache_clear_shadow(swp_entry_t entry, int nr_ents);
void show_swap_cache_info(void);
void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr);
struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
struct swap_iocb **plug);
struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct mempolicy *mpol, pgoff_t ilx);
struct folio *swapin_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf);
struct folio *swapin_folio(swp_entry_t entry, struct folio *folio);
void swap_update_readahead(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr);
static inline unsigned int folio_swap_flags(struct folio *folio)
{
return __swap_entry_to_info(folio->swap)->flags;
}
static inline int swap_zeromap_batch(swp_entry_t entry, int max_nr,
bool *is_zeromap)
{
struct swap_info_struct *sis = __swap_entry_to_info(entry);
unsigned long start = swp_offset(entry);
unsigned long end = start + max_nr;
bool first_bit;
first_bit = test_bit(start, sis->zeromap);
if (is_zeromap)
*is_zeromap = first_bit;
if (max_nr <= 1)
return max_nr;
if (first_bit)
return find_next_zero_bit(sis->zeromap, end, start) - start;
else
return find_next_bit(sis->zeromap, end, start) - start;
}
static inline int non_swapcache_batch(swp_entry_t entry, int max_nr)
{
int i;
for (i = 0; i < max_nr; i++) {
if (swap_cache_has_folio(entry))
return i;
entry.val++;
}
return i;
}
#else
struct swap_iocb;
static inline struct swap_cluster_info *swap_cluster_lock(
struct swap_info_struct *si, pgoff_t offset, bool irq)
{
return NULL;
}
static inline struct swap_cluster_info *swap_cluster_get_and_lock(
struct folio *folio)
{
return NULL;
}
static inline struct swap_cluster_info *swap_cluster_get_and_lock_irq(
struct folio *folio)
{
return NULL;
}
static inline void swap_cluster_unlock(struct swap_cluster_info *ci)
{
}
static inline void swap_cluster_unlock_irq(struct swap_cluster_info *ci)
{
}
static inline struct swap_info_struct *__swap_entry_to_info(swp_entry_t entry)
{
return NULL;
}
static inline int folio_alloc_swap(struct folio *folio)
{
return -EINVAL;
}
static inline int folio_dup_swap(struct folio *folio, struct page *page)
{
return -EINVAL;
}
static inline void folio_put_swap(struct folio *folio, struct page *page)
{
}
static inline void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
{
}
static inline void swap_write_unplug(struct swap_iocb *sio)
{
}
static inline struct address_space *swap_address_space(swp_entry_t entry)
{
return NULL;
}
static inline bool folio_matches_swap_entry(const struct folio *folio, swp_entry_t entry)
{
return false;
}
static inline void show_swap_cache_info(void)
{
}
static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx)
{
return NULL;
}
static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
struct vm_fault *vmf)
{
return NULL;
}
static inline struct folio *swapin_folio(swp_entry_t entry, struct folio *folio)
{
return NULL;
}
static inline void swap_update_readahead(struct folio *folio,
struct vm_area_struct *vma, unsigned long addr)
{
}
static inline int swap_writeout(struct folio *folio,
struct swap_iocb **swap_plug)
{
return 0;
}
static inline bool swap_cache_has_folio(swp_entry_t entry)
{
return false;
}
static inline struct folio *swap_cache_get_folio(swp_entry_t entry)
{
return NULL;
}
static inline void *swap_cache_get_shadow(swp_entry_t entry)
{
return NULL;
}
static inline void swap_cache_del_folio(struct folio *folio)
{
}
static inline void __swap_cache_del_folio(struct swap_cluster_info *ci,
struct folio *folio, swp_entry_t entry, void *shadow)
{
}
static inline void __swap_cache_replace_folio(struct swap_cluster_info *ci,
struct folio *old, struct folio *new)
{
}
static inline unsigned int folio_swap_flags(struct folio *folio)
{
return 0;
}
static inline int swap_zeromap_batch(swp_entry_t entry, int max_nr,
bool *has_zeromap)
{
return 0;
}
static inline int non_swapcache_batch(swp_entry_t entry, int max_nr)
{
return 0;
}
#endif
#endif