#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/balloon.h>
static DEFINE_SPINLOCK(balloon_pages_lock);
static void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
lockdep_assert_held(&balloon_pages_lock);
__SetPageOffline(page);
if (IS_ENABLED(CONFIG_BALLOON_MIGRATION)) {
SetPageMovableOps(page);
set_page_private(page, (unsigned long)balloon);
}
list_add(&page->lru, &balloon->pages);
}
static void balloon_page_finalize(struct page *page)
{
lockdep_assert_held(&balloon_pages_lock);
if (IS_ENABLED(CONFIG_BALLOON_MIGRATION))
set_page_private(page, 0);
}
static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
struct page *page)
{
balloon_page_insert(b_dev_info, page);
if (b_dev_info->adjust_managed_page_count)
adjust_managed_page_count(page, -1);
__count_vm_event(BALLOON_INFLATE);
inc_node_page_state(page, NR_BALLOON_PAGES);
}
size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
struct list_head *pages)
{
struct page *page, *tmp;
unsigned long flags;
size_t n_pages = 0;
spin_lock_irqsave(&balloon_pages_lock, flags);
list_for_each_entry_safe(page, tmp, pages, lru) {
list_del(&page->lru);
balloon_page_enqueue_one(b_dev_info, page);
n_pages++;
}
spin_unlock_irqrestore(&balloon_pages_lock, flags);
return n_pages;
}
EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
struct list_head *pages, size_t n_req_pages)
{
struct page *page, *tmp;
unsigned long flags;
size_t n_pages = 0;
spin_lock_irqsave(&balloon_pages_lock, flags);
list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
if (n_pages == n_req_pages)
break;
list_del(&page->lru);
if (b_dev_info->adjust_managed_page_count)
adjust_managed_page_count(page, 1);
balloon_page_finalize(page);
__count_vm_event(BALLOON_DEFLATE);
list_add(&page->lru, pages);
dec_node_page_state(page, NR_BALLOON_PAGES);
n_pages++;
}
spin_unlock_irqrestore(&balloon_pages_lock, flags);
return n_pages;
}
EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
struct page *balloon_page_alloc(void)
{
gfp_t gfp_flags = __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
if (IS_ENABLED(CONFIG_BALLOON_MIGRATION))
gfp_flags |= GFP_HIGHUSER_MOVABLE;
else
gfp_flags |= GFP_HIGHUSER;
return alloc_page(gfp_flags);
}
EXPORT_SYMBOL_GPL(balloon_page_alloc);
void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
struct page *page)
{
unsigned long flags;
spin_lock_irqsave(&balloon_pages_lock, flags);
balloon_page_enqueue_one(b_dev_info, page);
spin_unlock_irqrestore(&balloon_pages_lock, flags);
}
EXPORT_SYMBOL_GPL(balloon_page_enqueue);
struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
{
unsigned long flags;
LIST_HEAD(pages);
int n_pages;
n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1);
if (n_pages != 1) {
spin_lock_irqsave(&balloon_pages_lock, flags);
if (unlikely(list_empty(&b_dev_info->pages) &&
!b_dev_info->isolated_pages))
BUG();
spin_unlock_irqrestore(&balloon_pages_lock, flags);
return NULL;
}
return list_first_entry(&pages, struct page, lru);
}
EXPORT_SYMBOL_GPL(balloon_page_dequeue);
#ifdef CONFIG_BALLOON_MIGRATION
static struct balloon_dev_info *balloon_page_device(struct page *page)
{
return (struct balloon_dev_info *)page_private(page);
}
static bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
{
struct balloon_dev_info *b_dev_info;
unsigned long flags;
spin_lock_irqsave(&balloon_pages_lock, flags);
b_dev_info = balloon_page_device(page);
if (!b_dev_info) {
spin_unlock_irqrestore(&balloon_pages_lock, flags);
return false;
}
list_del(&page->lru);
b_dev_info->isolated_pages++;
spin_unlock_irqrestore(&balloon_pages_lock, flags);
return true;
}
static void balloon_page_putback(struct page *page)
{
struct balloon_dev_info *b_dev_info = balloon_page_device(page);
unsigned long flags;
if (WARN_ON_ONCE(!b_dev_info))
return;
spin_lock_irqsave(&balloon_pages_lock, flags);
list_add(&page->lru, &b_dev_info->pages);
b_dev_info->isolated_pages--;
spin_unlock_irqrestore(&balloon_pages_lock, flags);
}
static int balloon_page_migrate(struct page *newpage, struct page *page,
enum migrate_mode mode)
{
struct balloon_dev_info *b_dev_info = balloon_page_device(page);
unsigned long flags;
int rc;
if (WARN_ON_ONCE(!b_dev_info))
return -EAGAIN;
rc = b_dev_info->migratepage(b_dev_info, newpage, page, mode);
if (rc < 0 && rc != -ENOENT)
return rc;
spin_lock_irqsave(&balloon_pages_lock, flags);
if (!rc) {
get_page(newpage);
balloon_page_insert(b_dev_info, newpage);
__count_vm_event(BALLOON_MIGRATE);
if (b_dev_info->adjust_managed_page_count &&
page_zone(page) != page_zone(newpage)) {
adjust_managed_page_count(page, 1);
adjust_managed_page_count(newpage, -1);
}
} else {
__count_vm_event(BALLOON_DEFLATE);
if (b_dev_info->adjust_managed_page_count)
adjust_managed_page_count(page, 1);
}
b_dev_info->isolated_pages--;
balloon_page_finalize(page);
spin_unlock_irqrestore(&balloon_pages_lock, flags);
put_page(page);
return 0;
}
static const struct movable_operations balloon_mops = {
.migrate_page = balloon_page_migrate,
.isolate_page = balloon_page_isolate,
.putback_page = balloon_page_putback,
};
static int __init balloon_init(void)
{
return set_movable_ops(&balloon_mops, PGTY_offline);
}
core_initcall(balloon_init);
#endif