summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLuka Pivk <luka.pivk@toradex.com>2019-01-08 07:02:41 +0100
committerLuka Pivk <luka.pivk@toradex.com>2019-01-08 07:02:41 +0100
commit43f61e8df71c474cd56b5c971096b9cf8d36cd95 (patch)
tree0031be65fcc2fd2fd3eceeee9558b5526a19e431 /mm
parent5656ec50dc9950db865d53306326b1a732e3738e (diff)
parentc04c050f5bf98845bfe22164b8a1503d696a6e26 (diff)
Merge remote-tracking branch 'remotes/origin/linux-4.19.y'toradex_4.19.y
Signed-off-by: Luka Pivk <luka.pivk@toradex.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c20
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/vmscan.c6
3 files changed, 32 insertions, 13 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 15310f14c25e..d2cd70cfaa90 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2127,23 +2127,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
*/
old_pmd = pmdp_invalidate(vma, haddr, pmd);
-#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_migration = is_pmd_migration_entry(old_pmd);
- if (pmd_migration) {
+ if (unlikely(pmd_migration)) {
swp_entry_t entry;
entry = pmd_to_swp_entry(old_pmd);
page = pfn_to_page(swp_offset(entry));
- } else
-#endif
+ write = is_write_migration_entry(entry);
+ young = false;
+ soft_dirty = pmd_swp_soft_dirty(old_pmd);
+ } else {
page = pmd_page(old_pmd);
+ if (pmd_dirty(old_pmd))
+ SetPageDirty(page);
+ write = pmd_write(old_pmd);
+ young = pmd_young(old_pmd);
+ soft_dirty = pmd_soft_dirty(old_pmd);
+ }
VM_BUG_ON_PAGE(!page_count(page), page);
page_ref_add(page, HPAGE_PMD_NR - 1);
- if (pmd_dirty(old_pmd))
- SetPageDirty(page);
- write = pmd_write(old_pmd);
- young = pmd_young(old_pmd);
- soft_dirty = pmd_soft_dirty(old_pmd);
/*
* Withdraw the table only after we mark the pmd entry invalid.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6a62b2421cdf..93e73ccb4dec 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5538,6 +5538,18 @@ not_early:
cond_resched();
}
}
+#ifdef CONFIG_SPARSEMEM
+ /*
+ * If the zone does not span the rest of the section then
+ * we should at least initialize those pages. Otherwise we
+ * could blow up on a poisoned page in some paths which depend
+ * on full sections being initialized (e.g. memory hotplug).
+ */
+ while (end_pfn % PAGES_PER_SECTION) {
+ __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
+ end_pfn++;
+ }
+#endif
}
static void __meminit zone_init_free_lists(struct zone *zone)
@@ -7704,11 +7716,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
* handle each tail page individually in migration.
*/
if (PageHuge(page)) {
+ struct page *head = compound_head(page);
+ unsigned int skip_pages;
- if (!hugepage_migration_supported(page_hstate(page)))
+ if (!hugepage_migration_supported(page_hstate(head)))
goto unmovable;
- iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
+ skip_pages = (1 << compound_order(head)) - (page - head);
+ iter += skip_pages - 1;
continue;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c5ef7240cbcb..961401c46334 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2456,9 +2456,11 @@ out:
/*
* Scan types proportional to swappiness and
* their relative recent reclaim efficiency.
+ * Make sure we don't miss the last page
+ * because of a round-off error.
*/
- scan = div64_u64(scan * fraction[file],
- denominator);
+ scan = DIV64_U64_ROUND_UP(scan * fraction[file],
+ denominator);
break;
case SCAN_FILE:
case SCAN_ANON: