diff options
author | Vandana Salve <vsalve@nvidia.com> | 2014-06-10 14:55:32 +0530 |
---|---|---|
committer | Riham Haidar <rhaidar@nvidia.com> | 2014-06-11 17:09:22 -0700 |
commit | dfb505aca91ae0e976e4e2614aaae5e293d68f05 (patch) | |
tree | c6b881d29c7855c841f0804469e7680f37294969 /mm | |
parent | 3f06b1cb27d0b55a99854106b17b8089d61e12b3 (diff) |
mm: introduce migrate_replace_page() for migrating page to the given target
introduce migrate_replace_page for migrating
page to the given target
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
bug 1517584
Change-Id: I5f1d3bcb19ca7d9c9cf7234e8d3472a42c4f40af
Signed-off-by: Vandana Salve <vsalve@nvidia.com>
Reviewed-on: http://git-master/r/421675
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/migrate.c | 59 |
1 files changed, 59 insertions, 0 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index a88c12f2235d..316b0ade86da 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1084,6 +1084,65 @@ out: return rc; } +/* + * migrate_replace_page + * + * The function takes one single page and a target page (newpage) and + * tries to migrate data to the target page. The caller must ensure that + * the source page is locked with one additional get_page() call, which + * will be freed during the migration. The caller also must release newpage + * if migration fails, otherwise the ownership of the newpage is taken. + * Source page is released if migration succeeds. + * + * Return: error code or 0 on success. + */ +int migrate_replace_page(struct page *page, struct page *newpage) +{ + struct zone *zone = page_zone(page); + unsigned long flags; + int ret = -EAGAIN; + int pass; + + migrate_prep(); + + spin_lock_irqsave(&zone->lru_lock, flags); + + if (PageLRU(page) && + __isolate_lru_page(page, ISOLATE_UNEVICTABLE) == 0) { + struct lruvec *lruvec = mem_cgroup_page_lruvec(page, zone); + del_page_from_lru_list(page, lruvec, page_lru(page)); + spin_unlock_irqrestore(&zone->lru_lock, flags); + } else { + spin_unlock_irqrestore(&zone->lru_lock, flags); + return -EAGAIN; + } + + /* page is now isolated, so release additional reference */ + put_page(page); + + for (pass = 0; pass < 10 && ret != 0; pass++) { + cond_resched(); + + if (page_count(page) == 1) { + /* page was freed from under us, so we are done */ + ret = 0; + break; + } + ret = __unmap_and_move(page, newpage, 1, MIGRATE_SYNC); + } + + if (ret == 0) { + /* take ownership of newpage and add it to lru */ + putback_lru_page(newpage); + } else { + /* restore additional reference to the oldpage */ + get_page(page); + } + + putback_lru_page(page); + return ret; +} + #ifdef CONFIG_NUMA /* * Move a list of individual pages |