From acfe7d74484d6ccb25a80b8ff259fceceac0473e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 25 Jul 2011 08:55:42 +0200 Subject: slab: remove one NR_CPUS dependency Reduce high order allocations in do_tune_cpucache() for some setups. (NR_CPUS=4096 -> we need 64KB) Signed-off-by: Eric Dumazet Acked-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 1e523ed47c61..b80282aef73b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3934,7 +3934,7 @@ fail: struct ccupdate_struct { struct kmem_cache *cachep; - struct array_cache *new[NR_CPUS]; + struct array_cache *new[0]; }; static void do_ccupdate_local(void *info) @@ -3956,7 +3956,8 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, struct ccupdate_struct *new; int i; - new = kzalloc(sizeof(*new), gfp); + new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *), + gfp); if (!new) return -ENOMEM; -- cgit v1.2.3 From eacbbae385bf492229e84024863960d3160547c7 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 28 Jul 2011 13:59:49 -0700 Subject: slab: use NUMA_NO_NODE Use the nice enumerated constant. Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Pekka Enberg --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index b80282aef73b..95947400702b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3403,7 +3403,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); - if (nodeid == -1) + if (nodeid == NUMA_NO_NODE) nodeid = slab_node; if (unlikely(!cachep->nodelists[nodeid])) { -- cgit v1.2.3 From c027a474a68065391c8773f6e83ed5412657e369 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 30 Jul 2011 16:35:02 +0200 Subject: oom: task->mm == NULL doesn't mean the memory was freed exit_mm() sets ->mm == NULL then it does mmput()->exit_mmap() which frees the memory. However select_bad_process() checks ->mm != NULL before TIF_MEMDIE, so it continues to kill other tasks even if we have the oom-killed task freeing its memory. Change select_bad_process() to check ->mm after TIF_MEMDIE, but skip the tasks which have already passed exit_notify() to ensure a zombie with TIF_MEMDIE set can't block oom-killer. Alternatively we could probably clear TIF_MEMDIE after exit_mmap(). Signed-off-by: Oleg Nesterov Reviewed-by: KOSAKI Motohiro Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index eafff89b3dd6..626303b52f3c 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -303,7 +303,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, do_each_thread(g, p) { unsigned int points; - if (!p->mm) + if (p->exit_state) continue; if (oom_unkillable_task(p, mem, nodemask)) continue; @@ -319,6 +319,8 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, */ if (test_tsk_thread_flag(p, TIF_MEMDIE)) return ERR_PTR(-1UL); + if (!p->mm) + continue; if (p->flags & PF_EXITING) { /* -- cgit v1.2.3 From ea8f5fb8a71fddaf5f3a17100d3247855701f732 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Wed, 13 Jul 2011 13:14:27 +0800 Subject: HWPoison: add memory_failure_queue() memory_failure() is the entry point for HWPoison memory error recovery. It must be called in process context. But commonly hardware memory errors are notified via MCE or NMI, so some delayed execution mechanism must be used. In MCE handler, a work queue + ring buffer mechanism is used. In addition to MCE, now APEI (ACPI Platform Error Interface) GHES (Generic Hardware Error Source) can be used to report memory errors too. To add support to APEI GHES memory recovery, a mechanism similar to that of MCE is implemented. memory_failure_queue() is the new entry point that can be called in IRQ context. The next step is to make MCE handler uses this interface too. Signed-off-by: Huang Ying Cc: Andi Kleen Cc: Wu Fengguang Cc: Andrew Morton Signed-off-by: Len Brown --- mm/memory-failure.c | 92 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) (limited to 'mm') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 740c4f52059c..2b43ba051ac9 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -53,6 +53,7 @@ #include #include #include +#include #include "internal.h" int sysctl_memory_failure_early_kill __read_mostly = 0; @@ -1178,6 +1179,97 @@ void memory_failure(unsigned long pfn, int trapno) __memory_failure(pfn, trapno, 0); } +#define MEMORY_FAILURE_FIFO_ORDER 4 +#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER) + +struct memory_failure_entry { + unsigned long pfn; + int trapno; + int flags; +}; + +struct memory_failure_cpu { + DECLARE_KFIFO(fifo, struct memory_failure_entry, + MEMORY_FAILURE_FIFO_SIZE); + spinlock_t lock; + struct work_struct work; +}; + +static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu); + +/** + * memory_failure_queue - Schedule handling memory failure of a page. + * @pfn: Page Number of the corrupted page + * @trapno: Trap number reported in the signal to user space. + * @flags: Flags for memory failure handling + * + * This function is called by the low level hardware error handler + * when it detects hardware memory corruption of a page. It schedules + * the recovering of error page, including dropping pages, killing + * processes etc. + * + * The function is primarily of use for corruptions that + * happen outside the current execution context (e.g. when + * detected by a background scrubber) + * + * Can run in IRQ context. + */ +void memory_failure_queue(unsigned long pfn, int trapno, int flags) +{ + struct memory_failure_cpu *mf_cpu; + unsigned long proc_flags; + struct memory_failure_entry entry = { + .pfn = pfn, + .trapno = trapno, + .flags = flags, + }; + + mf_cpu = &get_cpu_var(memory_failure_cpu); + spin_lock_irqsave(&mf_cpu->lock, proc_flags); + if (kfifo_put(&mf_cpu->fifo, &entry)) + schedule_work_on(smp_processor_id(), &mf_cpu->work); + else + pr_err("Memory failure: buffer overflow when queuing memory failure at 0x%#lx\n", + pfn); + spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); + put_cpu_var(memory_failure_cpu); +} +EXPORT_SYMBOL_GPL(memory_failure_queue); + +static void memory_failure_work_func(struct work_struct *work) +{ + struct memory_failure_cpu *mf_cpu; + struct memory_failure_entry entry = { 0, }; + unsigned long proc_flags; + int gotten; + + mf_cpu = &__get_cpu_var(memory_failure_cpu); + for (;;) { + spin_lock_irqsave(&mf_cpu->lock, proc_flags); + gotten = kfifo_get(&mf_cpu->fifo, &entry); + spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); + if (!gotten) + break; + __memory_failure(entry.pfn, entry.trapno, entry.flags); + } +} + +static int __init memory_failure_init(void) +{ + struct memory_failure_cpu *mf_cpu; + int cpu; + + for_each_possible_cpu(cpu) { + mf_cpu = &per_cpu(memory_failure_cpu, cpu); + spin_lock_init(&mf_cpu->lock); + INIT_KFIFO(mf_cpu->fifo); + INIT_WORK(&mf_cpu->work, memory_failure_work_func); + } + + return 0; +} +core_initcall(memory_failure_init); + /** * unpoison_memory - Unpoison a previously poisoned page * @pfn: Page number of the to be unpoisoned page -- cgit v1.2.3 From dd48c085c1cdf9446f92826f1fd451167fb6c2fd Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Wed, 3 Aug 2011 16:21:01 -0700 Subject: fault-injection: add ability to export fault_attr in arbitrary directory init_fault_attr_dentries() is used to export fault_attr via debugfs. But it can only export it in debugfs root directory. Per Forlin is working on mmc_fail_request which adds support to inject data errors after a completed host transfer in MMC subsystem. The fault_attr for mmc_fail_request should be defined per mmc host and export it in debugfs directory per mmc host like /sys/kernel/debug/mmc0/mmc_fail_request. init_fault_attr_dentries() doesn't help for mmc_fail_request. So this introduces fault_create_debugfs_attr() which is able to create a directory in the arbitrary directory and replace init_fault_attr_dentries(). [akpm@linux-foundation.org: extraneous semicolon, per Randy] Signed-off-by: Akinobu Mita Tested-by: Per Forlin Cc: Jens Axboe Cc: Christoph Lameter Cc: Pekka Enberg Cc: Matt Mackall Cc: Randy Dunlap Cc: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/failslab.c | 14 +++++++------- mm/page_alloc.c | 13 +++++-------- 2 files changed, 12 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/mm/failslab.c b/mm/failslab.c index 1ce58c201dca..0dd7b8fec71c 100644 --- a/mm/failslab.c +++ b/mm/failslab.c @@ -34,23 +34,23 @@ __setup("failslab=", setup_failslab); #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int __init failslab_debugfs_init(void) { + struct dentry *dir; mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; - int err; - err = init_fault_attr_dentries(&failslab.attr, "failslab"); - if (err) - return err; + dir = fault_create_debugfs_attr("failslab", NULL, &failslab.attr); + if (IS_ERR(dir)) + return PTR_ERR(dir); - if (!debugfs_create_bool("ignore-gfp-wait", mode, failslab.attr.dir, + if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, &failslab.ignore_gfp_wait)) goto fail; - if (!debugfs_create_bool("cache-filter", mode, failslab.attr.dir, + if (!debugfs_create_bool("cache-filter", mode, dir, &failslab.cache_filter)) goto fail; return 0; fail: - cleanup_fault_attr_dentries(&failslab.attr); + debugfs_remove_recursive(dir); return -ENOMEM; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1dbcf8888f14..6e8ecb6e021c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1409,14 +1409,11 @@ static int __init fail_page_alloc_debugfs(void) { mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; struct dentry *dir; - int err; - err = init_fault_attr_dentries(&fail_page_alloc.attr, - "fail_page_alloc"); - if (err) - return err; - - dir = fail_page_alloc.attr.dir; + dir = fault_create_debugfs_attr("fail_page_alloc", NULL, + &fail_page_alloc.attr); + if (IS_ERR(dir)) + return PTR_ERR(dir); if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, &fail_page_alloc.ignore_gfp_wait)) @@ -1430,7 +1427,7 @@ static int __init fail_page_alloc_debugfs(void) return 0; fail: - cleanup_fault_attr_dentries(&fail_page_alloc.attr); + debugfs_remove_recursive(dir); return -ENOMEM; } -- cgit v1.2.3 From 6328650bb4d854a7dc1498d1c0048b838b0d340c Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:18 -0700 Subject: radix_tree: exceptional entries and indices A patchset to extend tmpfs to MAX_LFS_FILESIZE by abandoning its peculiar swap vector, instead keeping a file's swap entries in the same radix tree as its struct page pointers: thus saving memory, and simplifying its code and locking. This patch: The radix_tree is used by several subsystems for different purposes. A major use is to store the struct page pointers of a file's pagecache for memory management. But what if mm wanted to store something other than page pointers there too? The low bit of a radix_tree entry is already used to denote an indirect pointer, for internal use, and the unlikely radix_tree_deref_retry() case. Define the next bit as denoting an exceptional entry, and supply inline functions radix_tree_exception() to return non-0 in either unlikely case, and radix_tree_exceptional_entry() to return non-0 in the second case. If a subsystem already uses radix_tree with that bit set, no problem: it does not affect internal workings at all, but is defined for the convenience of those storing well-aligned pointers in the radix_tree. The radix_tree_gang_lookups have an implicit assumption that the caller can deduce the offset of each entry returned e.g. by the page->index of a struct page. But that may not be feasible for some kinds of item to be stored there. radix_tree_gang_lookup_slot() allow for an optional indices argument, output array in which to return those offsets. The same could be added to other radix_tree_gang_lookups, but for now keep it to the only one for which we need it. Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index 867d40222ec7..b83aebfd0a00 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -840,7 +840,7 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, rcu_read_lock(); restart: nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, - (void ***)pages, start, nr_pages); + (void ***)pages, NULL, start, nr_pages); ret = 0; for (i = 0; i < nr_found; i++) { struct page *page; @@ -903,7 +903,7 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, rcu_read_lock(); restart: nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, - (void ***)pages, index, nr_pages); + (void ***)pages, NULL, index, nr_pages); ret = 0; for (i = 0; i < nr_found; i++) { struct page *page; -- cgit v1.2.3 From a2c16d6cb0e478812829ca84aeabd02e36af35eb Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:19 -0700 Subject: mm: let swap use exceptional entries If swap entries are to be stored along with struct page pointers in a radix tree, they need to be distinguished as exceptional entries. Most of the handling of swap entries in radix tree will be contained in shmem.c, but a few functions in filemap.c's common code need to check for their appearance: find_get_page(), find_lock_page(), find_get_pages() and find_get_pages_contig(). So as not to slow their fast paths, tuck those checks inside the existing checks for unlikely radix_tree_deref_slot(); except for find_lock_page(), where it is an added test. And make it a BUG in find_get_pages_tag(), which is not applied to tmpfs files. A part of the reason for eliminating shmem_readpage() earlier, was to minimize the places where common code would need to allow for swap entries. The swp_entry_t known to swapfile.c must be massaged into a slightly different form when stored in the radix tree, just as it gets massaged into a pte_t when stored in page tables. In an i386 kernel this limits its information (type and page offset) to 30 bits: given 32 "types" of swapfile and 4kB pagesize, that's a maximum swapfile size of 128GB. Which is less than the 512GB we previously allowed with X86_PAE (where the swap entry can occupy the entire upper 32 bits of a pte_t), but not a new limitation on 32-bit without PAE; and there's not a new limitation on 64-bit (where swap filesize is already limited to 16TB by a 32-bit page offset). Thirty areas of 128GB is probably still enough swap for a 64GB 32-bit machine. Provide swp_to_radix_entry() and radix_to_swp_entry() conversions, and enforce filesize limit in read_swap_header(), just as for ptes. Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 49 +++++++++++++++++++++++++++++++------------------ mm/swapfile.c | 20 ++++++++++++-------- 2 files changed, 43 insertions(+), 26 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index b83aebfd0a00..76bfb6460f57 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -714,9 +714,12 @@ repeat: page = radix_tree_deref_slot(pagep); if (unlikely(!page)) goto out; - if (radix_tree_deref_retry(page)) + if (radix_tree_exception(page)) { + if (radix_tree_exceptional_entry(page)) + goto out; + /* radix_tree_deref_retry(page) */ goto repeat; - + } if (!page_cache_get_speculative(page)) goto repeat; @@ -753,7 +756,7 @@ struct page *find_lock_page(struct address_space *mapping, pgoff_t offset) repeat: page = find_get_page(mapping, offset); - if (page) { + if (page && !radix_tree_exception(page)) { lock_page(page); /* Has the page been truncated? */ if (unlikely(page->mapping != mapping)) { @@ -849,11 +852,14 @@ repeat: if (unlikely(!page)) continue; - /* - * This can only trigger when the entry at index 0 moves out - * of or back to the root: none yet gotten, safe to restart. - */ - if (radix_tree_deref_retry(page)) { + if (radix_tree_exception(page)) { + if (radix_tree_exceptional_entry(page)) + continue; + /* + * radix_tree_deref_retry(page): + * can only trigger when entry at index 0 moves out of + * or back to root: none yet gotten, safe to restart. + */ WARN_ON(start | i); goto restart; } @@ -912,12 +918,16 @@ repeat: if (unlikely(!page)) continue; - /* - * This can only trigger when the entry at index 0 moves out - * of or back to the root: none yet gotten, safe to restart. - */ - if (radix_tree_deref_retry(page)) + if (radix_tree_exception(page)) { + if (radix_tree_exceptional_entry(page)) + break; + /* + * radix_tree_deref_retry(page): + * can only trigger when entry at index 0 moves out of + * or back to root: none yet gotten, safe to restart. + */ goto restart; + } if (!page_cache_get_speculative(page)) goto repeat; @@ -977,12 +987,15 @@ repeat: if (unlikely(!page)) continue; - /* - * This can only trigger when the entry at index 0 moves out - * of or back to the root: none yet gotten, safe to restart. - */ - if (radix_tree_deref_retry(page)) + if (radix_tree_exception(page)) { + BUG_ON(radix_tree_exceptional_entry(page)); + /* + * radix_tree_deref_retry(page): + * can only trigger when entry at index 0 moves out of + * or back to root: none yet gotten, safe to restart. + */ goto restart; + } if (!page_cache_get_speculative(page)) goto repeat; diff --git a/mm/swapfile.c b/mm/swapfile.c index 1b8c33907242..17bc224bce68 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1924,20 +1924,24 @@ static unsigned long read_swap_header(struct swap_info_struct *p, /* * Find out how many pages are allowed for a single swap - * device. There are two limiting factors: 1) the number of - * bits for the swap offset in the swp_entry_t type and - * 2) the number of bits in the a swap pte as defined by - * the different architectures. In order to find the - * largest possible bit mask a swap entry with swap type 0 + * device. There are three limiting factors: 1) the number + * of bits for the swap offset in the swp_entry_t type, and + * 2) the number of bits in the swap pte as defined by the + * the different architectures, and 3) the number of free bits + * in an exceptional radix_tree entry. In order to find the + * largest possible bit mask, a swap entry with swap type 0 * and swap offset ~0UL is created, encoded to a swap pte, - * decoded to a swp_entry_t again and finally the swap + * decoded to a swp_entry_t again, and finally the swap * offset is extracted. This will mask all the bits from * the initial ~0UL mask that can't be encoded in either * the swp_entry_t or the architecture definition of a - * swap pte. + * swap pte. Then the same is done for a radix_tree entry. */ maxpages = swp_offset(pte_to_swp_entry( - swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; + swp_entry_to_pte(swp_entry(0, ~0UL)))); + maxpages = swp_offset(radix_to_swp_entry( + swp_to_radix_entry(swp_entry(0, maxpages)))) + 1; + if (maxpages > swap_header->info.last_page) { maxpages = swap_header->info.last_page + 1; /* p->max is an unsigned int: don't overflow it */ -- cgit v1.2.3 From 285b2c4fdd69ea73b4762785d8c6be83b6c074a6 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:20 -0700 Subject: tmpfs: demolish old swap vector support The maximum size of a shmem/tmpfs file has been limited by the maximum size of its triple-indirect swap vector. With 4kB page size, maximum filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of that on a 64-bit kernel. (With 8kB page size, maximum filesize was just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel, MAX_LFS_FILESIZE being then more restrictive than swap vector layout.) It's a shame that tmpfs should be more restrictive than ramfs, and this limitation has now been noticed. Add another level to the swap vector? No, it became obscure and hard to maintain, once I complicated it to make use of highmem pages nine years ago: better choose another way. Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then tmpfs would never have invented its own peculiar radix tree: we would have fitted swap entries into the common radix tree instead, in much the same way as we fit swap entries into page tables. And why should each file have a separate radix tree for its pages and for its swap entries? The swap entries are required precisely where and when the pages are not. We want to put them together in a single radix tree: which can then avoid much of the locking which was needed to prevent them from being exchanged underneath us. This also avoids the waste of memory devoted to swap vectors, first in the shmem_inode itself, then at least two more pages once a file grew beyond 16 data pages (pages accounted by df and du, but not by memcg). Allocated upfront, to avoid allocation when under swapping pressure, but pure waste when CONFIG_SWAP is not set - I have never spattered around the ifdefs to prevent that, preferring this move to sharing the common radix tree instead. There are three downsides to sharing the radix tree. One, that it binds tmpfs more tightly to the rest of mm, either requiring knowledge of swap entries in radix tree there, or duplication of its code here in shmem.c. I believe that the simplications and memory savings (and probable higher performance, not yet measured) justify that. Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix nodes that cannot be freed under memory pressure - whereas before it was the less precious highmem swap vector pages that could not be freed. I'm hoping that 64-bit has now been accessible for long enough, that the highmem argument has grown much less persuasive. Three, that swapoff is slower than it used to be on tmpfs files, since it's using a simple generic mechanism not tailored to it: I find this noticeable, and shall want to improve, but maybe nobody else will notice. So... now remove most of the old swap vector code from shmem.c. But, for the moment, keep the simple i_direct vector of 16 pages, with simple accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation to help mark where swap needs to be handled in subsequent patches. Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 782 +++++++------------------------------------------------------ 1 file changed, 84 insertions(+), 698 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 5cc21f8b4cd3..5574b00ca771 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -66,37 +66,9 @@ static struct vfsmount *shm_mnt; #include #include -/* - * The maximum size of a shmem/tmpfs file is limited by the maximum size of - * its triple-indirect swap vector - see illustration at shmem_swp_entry(). - * - * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel, - * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum - * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel, - * MAX_LFS_FILESIZE being then more restrictive than swap vector layout. - * - * We use / and * instead of shifts in the definitions below, so that the swap - * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE. - */ -#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) -#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) - -#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) -#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT) - -#define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE) -#define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT)) - #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) -/* info->flags needs VM_flags to handle pagein/truncate races efficiently */ -#define SHMEM_PAGEIN VM_READ -#define SHMEM_TRUNCATE VM_WRITE - -/* Definition to limit shmem_truncate's steps between cond_rescheds */ -#define LATENCY_LIMIT 64 - /* Pretend that each entry is of this size in directory's i_size */ #define BOGO_DIRENT_SIZE 20 @@ -107,7 +79,7 @@ struct shmem_xattr { char value[0]; }; -/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ +/* Flag allocation requirements to shmem_getpage */ enum sgp_type { SGP_READ, /* don't exceed i_size, don't allocate page */ SGP_CACHE, /* don't exceed i_size, may allocate page */ @@ -137,56 +109,6 @@ static inline int shmem_getpage(struct inode *inode, pgoff_t index, mapping_gfp_mask(inode->i_mapping), fault_type); } -static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) -{ - /* - * The above definition of ENTRIES_PER_PAGE, and the use of - * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: - * might be reconsidered if it ever diverges from PAGE_SIZE. - * - * Mobility flags are masked out as swap vectors cannot move - */ - return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, - PAGE_CACHE_SHIFT-PAGE_SHIFT); -} - -static inline void shmem_dir_free(struct page *page) -{ - __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); -} - -static struct page **shmem_dir_map(struct page *page) -{ - return (struct page **)kmap_atomic(page, KM_USER0); -} - -static inline void shmem_dir_unmap(struct page **dir) -{ - kunmap_atomic(dir, KM_USER0); -} - -static swp_entry_t *shmem_swp_map(struct page *page) -{ - return (swp_entry_t *)kmap_atomic(page, KM_USER1); -} - -static inline void shmem_swp_balance_unmap(void) -{ - /* - * When passing a pointer to an i_direct entry, to code which - * also handles indirect entries and so will shmem_swp_unmap, - * we must arrange for the preempt count to remain in balance. - * What kmap_atomic of a lowmem page does depends on config - * and architecture, so pretend to kmap_atomic some lowmem page. - */ - (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); -} - -static inline void shmem_swp_unmap(swp_entry_t *entry) -{ - kunmap_atomic(entry, KM_USER1); -} - static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) { return sb->s_fs_info; @@ -303,468 +225,56 @@ static void shmem_recalc_inode(struct inode *inode) } } -/** - * shmem_swp_entry - find the swap vector position in the info structure - * @info: info structure for the inode - * @index: index of the page to find - * @page: optional page to add to the structure. Has to be preset to - * all zeros - * - * If there is no space allocated yet it will return NULL when - * page is NULL, else it will use the page for the needed block, - * setting it to NULL on return to indicate that it has been used. - * - * The swap vector is organized the following way: - * - * There are SHMEM_NR_DIRECT entries directly stored in the - * shmem_inode_info structure. So small files do not need an addional - * allocation. - * - * For pages with index > SHMEM_NR_DIRECT there is the pointer - * i_indirect which points to a page which holds in the first half - * doubly indirect blocks, in the second half triple indirect blocks: - * - * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the - * following layout (for SHMEM_NR_DIRECT == 16): - * - * i_indirect -> dir --> 16-19 - * | +-> 20-23 - * | - * +-->dir2 --> 24-27 - * | +-> 28-31 - * | +-> 32-35 - * | +-> 36-39 - * | - * +-->dir3 --> 40-43 - * +-> 44-47 - * +-> 48-51 - * +-> 52-55 - */ -static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) -{ - unsigned long offset; - struct page **dir; - struct page *subdir; - - if (index < SHMEM_NR_DIRECT) { - shmem_swp_balance_unmap(); - return info->i_direct+index; - } - if (!info->i_indirect) { - if (page) { - info->i_indirect = *page; - *page = NULL; - } - return NULL; /* need another page */ - } - - index -= SHMEM_NR_DIRECT; - offset = index % ENTRIES_PER_PAGE; - index /= ENTRIES_PER_PAGE; - dir = shmem_dir_map(info->i_indirect); - - if (index >= ENTRIES_PER_PAGE/2) { - index -= ENTRIES_PER_PAGE/2; - dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; - index %= ENTRIES_PER_PAGE; - subdir = *dir; - if (!subdir) { - if (page) { - *dir = *page; - *page = NULL; - } - shmem_dir_unmap(dir); - return NULL; /* need another page */ - } - shmem_dir_unmap(dir); - dir = shmem_dir_map(subdir); - } - - dir += index; - subdir = *dir; - if (!subdir) { - if (!page || !(subdir = *page)) { - shmem_dir_unmap(dir); - return NULL; /* need a page */ - } - *dir = subdir; - *page = NULL; - } - shmem_dir_unmap(dir); - return shmem_swp_map(subdir) + offset; -} - -static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) +static void shmem_put_swap(struct shmem_inode_info *info, pgoff_t index, + swp_entry_t swap) { - long incdec = value? 1: -1; - - entry->val = value; - info->swapped += incdec; - if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { - struct page *page = kmap_atomic_to_page(entry); - set_page_private(page, page_private(page) + incdec); - } -} - -/** - * shmem_swp_alloc - get the position of the swap entry for the page. - * @info: info structure for the inode - * @index: index of the page to find - * @sgp: check and recheck i_size? skip allocation? - * @gfp: gfp mask to use for any page allocation - * - * If the entry does not exist, allocate it. - */ -static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, - unsigned long index, enum sgp_type sgp, gfp_t gfp) -{ - struct inode *inode = &info->vfs_inode; - struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); - struct page *page = NULL; - swp_entry_t *entry; - - if (sgp != SGP_WRITE && - ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) - return ERR_PTR(-EINVAL); - - while (!(entry = shmem_swp_entry(info, index, &page))) { - if (sgp == SGP_READ) - return shmem_swp_map(ZERO_PAGE(0)); - /* - * Test used_blocks against 1 less max_blocks, since we have 1 data - * page (and perhaps indirect index pages) yet to allocate: - * a waste to allocate index if we cannot allocate data. - */ - if (sbinfo->max_blocks) { - if (percpu_counter_compare(&sbinfo->used_blocks, - sbinfo->max_blocks - 1) >= 0) - return ERR_PTR(-ENOSPC); - percpu_counter_inc(&sbinfo->used_blocks); - inode->i_blocks += BLOCKS_PER_PAGE; - } - - spin_unlock(&info->lock); - page = shmem_dir_alloc(gfp); - spin_lock(&info->lock); - - if (!page) { - shmem_free_blocks(inode, 1); - return ERR_PTR(-ENOMEM); - } - if (sgp != SGP_WRITE && - ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { - entry = ERR_PTR(-EINVAL); - break; - } - if (info->next_index <= index) - info->next_index = index + 1; - } - if (page) { - /* another task gave its page, or truncated the file */ - shmem_free_blocks(inode, 1); - shmem_dir_free(page); - } - if (info->next_index <= index && !IS_ERR(entry)) - info->next_index = index + 1; - return entry; -} - -/** - * shmem_free_swp - free some swap entries in a directory - * @dir: pointer to the directory - * @edir: pointer after last entry of the directory - * @punch_lock: pointer to spinlock when needed for the holepunch case - */ -static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir, - spinlock_t *punch_lock) -{ - spinlock_t *punch_unlock = NULL; - swp_entry_t *ptr; - int freed = 0; - - for (ptr = dir; ptr < edir; ptr++) { - if (ptr->val) { - if (unlikely(punch_lock)) { - punch_unlock = punch_lock; - punch_lock = NULL; - spin_lock(punch_unlock); - if (!ptr->val) - continue; - } - free_swap_and_cache(*ptr); - *ptr = (swp_entry_t){0}; - freed++; - } - } - if (punch_unlock) - spin_unlock(punch_unlock); - return freed; -} - -static int shmem_map_and_free_swp(struct page *subdir, int offset, - int limit, struct page ***dir, spinlock_t *punch_lock) -{ - swp_entry_t *ptr; - int freed = 0; - - ptr = shmem_swp_map(subdir); - for (; offset < limit; offset += LATENCY_LIMIT) { - int size = limit - offset; - if (size > LATENCY_LIMIT) - size = LATENCY_LIMIT; - freed += shmem_free_swp(ptr+offset, ptr+offset+size, - punch_lock); - if (need_resched()) { - shmem_swp_unmap(ptr); - if (*dir) { - shmem_dir_unmap(*dir); - *dir = NULL; - } - cond_resched(); - ptr = shmem_swp_map(subdir); - } - } - shmem_swp_unmap(ptr); - return freed; + if (index < SHMEM_NR_DIRECT) + info->i_direct[index] = swap; } -static void shmem_free_pages(struct list_head *next) +static swp_entry_t shmem_get_swap(struct shmem_inode_info *info, pgoff_t index) { - struct page *page; - int freed = 0; - - do { - page = container_of(next, struct page, lru); - next = next->next; - shmem_dir_free(page); - freed++; - if (freed >= LATENCY_LIMIT) { - cond_resched(); - freed = 0; - } - } while (next); + return (index < SHMEM_NR_DIRECT) ? + info->i_direct[index] : (swp_entry_t){0}; } -void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) +void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) { + struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); - unsigned long idx; - unsigned long size; - unsigned long limit; - unsigned long stage; - unsigned long diroff; - struct page **dir; - struct page *topdir; - struct page *middir; - struct page *subdir; - swp_entry_t *ptr; - LIST_HEAD(pages_to_free); - long nr_pages_to_free = 0; - long nr_swaps_freed = 0; - int offset; - int freed; - int punch_hole; - spinlock_t *needs_lock; - spinlock_t *punch_lock; - unsigned long upper_limit; + pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + pgoff_t end = (lend >> PAGE_CACHE_SHIFT); + pgoff_t index; + swp_entry_t swap; - truncate_inode_pages_range(inode->i_mapping, start, end); + truncate_inode_pages_range(mapping, lstart, lend); - inode->i_ctime = inode->i_mtime = CURRENT_TIME; - idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - if (idx >= info->next_index) - return; + if (end > SHMEM_NR_DIRECT) + end = SHMEM_NR_DIRECT; spin_lock(&info->lock); - info->flags |= SHMEM_TRUNCATE; - if (likely(end == (loff_t) -1)) { - limit = info->next_index; - upper_limit = SHMEM_MAX_INDEX; - info->next_index = idx; - needs_lock = NULL; - punch_hole = 0; - } else { - if (end + 1 >= inode->i_size) { /* we may free a little more */ - limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; - upper_limit = SHMEM_MAX_INDEX; - } else { - limit = (end + 1) >> PAGE_CACHE_SHIFT; - upper_limit = limit; - } - needs_lock = &info->lock; - punch_hole = 1; - } - - topdir = info->i_indirect; - if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { - info->i_indirect = NULL; - nr_pages_to_free++; - list_add(&topdir->lru, &pages_to_free); - } - spin_unlock(&info->lock); - - if (info->swapped && idx < SHMEM_NR_DIRECT) { - ptr = info->i_direct; - size = limit; - if (size > SHMEM_NR_DIRECT) - size = SHMEM_NR_DIRECT; - nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock); - } - - /* - * If there are no indirect blocks or we are punching a hole - * below indirect blocks, nothing to be done. - */ - if (!topdir || limit <= SHMEM_NR_DIRECT) - goto done2; - - /* - * The truncation case has already dropped info->lock, and we're safe - * because i_size and next_index have already been lowered, preventing - * access beyond. But in the punch_hole case, we still need to take - * the lock when updating the swap directory, because there might be - * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or - * shmem_writepage. However, whenever we find we can remove a whole - * directory page (not at the misaligned start or end of the range), - * we first NULLify its pointer in the level above, and then have no - * need to take the lock when updating its contents: needs_lock and - * punch_lock (either pointing to info->lock or NULL) manage this. - */ - - upper_limit -= SHMEM_NR_DIRECT; - limit -= SHMEM_NR_DIRECT; - idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; - offset = idx % ENTRIES_PER_PAGE; - idx -= offset; - - dir = shmem_dir_map(topdir); - stage = ENTRIES_PER_PAGEPAGE/2; - if (idx < ENTRIES_PER_PAGEPAGE/2) { - middir = topdir; - diroff = idx/ENTRIES_PER_PAGE; - } else { - dir += ENTRIES_PER_PAGE/2; - dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; - while (stage <= idx) - stage += ENTRIES_PER_PAGEPAGE; - middir = *dir; - if (*dir) { - diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % - ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; - if (!diroff && !offset && upper_limit >= stage) { - if (needs_lock) { - spin_lock(needs_lock); - *dir = NULL; - spin_unlock(needs_lock); - needs_lock = NULL; - } else - *dir = NULL; - nr_pages_to_free++; - list_add(&middir->lru, &pages_to_free); - } - shmem_dir_unmap(dir); - dir = shmem_dir_map(middir); - } else { - diroff = 0; - offset = 0; - idx = stage; + for (index = start; index < end; index++) { + swap = shmem_get_swap(info, index); + if (swap.val) { + free_swap_and_cache(swap); + shmem_put_swap(info, index, (swp_entry_t){0}); + info->swapped--; } } - for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { - if (unlikely(idx == stage)) { - shmem_dir_unmap(dir); - dir = shmem_dir_map(topdir) + - ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; - while (!*dir) { - dir++; - idx += ENTRIES_PER_PAGEPAGE; - if (idx >= limit) - goto done1; - } - stage = idx + ENTRIES_PER_PAGEPAGE; - middir = *dir; - if (punch_hole) - needs_lock = &info->lock; - if (upper_limit >= stage) { - if (needs_lock) { - spin_lock(needs_lock); - *dir = NULL; - spin_unlock(needs_lock); - needs_lock = NULL; - } else - *dir = NULL; - nr_pages_to_free++; - list_add(&middir->lru, &pages_to_free); - } - shmem_dir_unmap(dir); - cond_resched(); - dir = shmem_dir_map(middir); - diroff = 0; - } - punch_lock = needs_lock; - subdir = dir[diroff]; - if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) { - if (needs_lock) { - spin_lock(needs_lock); - dir[diroff] = NULL; - spin_unlock(needs_lock); - punch_lock = NULL; - } else - dir[diroff] = NULL; - nr_pages_to_free++; - list_add(&subdir->lru, &pages_to_free); - } - if (subdir && page_private(subdir) /* has swap entries */) { - size = limit - idx; - if (size > ENTRIES_PER_PAGE) - size = ENTRIES_PER_PAGE; - freed = shmem_map_and_free_swp(subdir, - offset, size, &dir, punch_lock); - if (!dir) - dir = shmem_dir_map(middir); - nr_swaps_freed += freed; - if (offset || punch_lock) { - spin_lock(&info->lock); - set_page_private(subdir, - page_private(subdir) - freed); - spin_unlock(&info->lock); - } else - BUG_ON(page_private(subdir) != freed); - } - offset = 0; - } -done1: - shmem_dir_unmap(dir); -done2: - if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { + if (mapping->nrpages) { + spin_unlock(&info->lock); /* - * Call truncate_inode_pages again: racing shmem_unuse_inode - * may have swizzled a page in from swap since - * truncate_pagecache or generic_delete_inode did it, before we - * lowered next_index. Also, though shmem_getpage checks - * i_size before adding to cache, no recheck after: so fix the - * narrow window there too. + * A page may have meanwhile sneaked in from swap. */ - truncate_inode_pages_range(inode->i_mapping, start, end); + truncate_inode_pages_range(mapping, lstart, lend); + spin_lock(&info->lock); } - spin_lock(&info->lock); - info->flags &= ~SHMEM_TRUNCATE; - info->swapped -= nr_swaps_freed; - if (nr_pages_to_free) - shmem_free_blocks(inode, nr_pages_to_free); shmem_recalc_inode(inode); spin_unlock(&info->lock); - /* - * Empty swap vector directory pages to be freed? - */ - if (!list_empty(&pages_to_free)) { - pages_to_free.prev->next = NULL; - shmem_free_pages(pages_to_free.next); - } + inode->i_ctime = inode->i_mtime = CURRENT_TIME; } EXPORT_SYMBOL_GPL(shmem_truncate_range); @@ -797,19 +307,6 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr) if (page) unlock_page(page); } - /* - * Reset SHMEM_PAGEIN flag so that shmem_truncate can - * detect if any pages might have been added to cache - * after truncate_inode_pages. But we needn't bother - * if it's being fully truncated to zero-length: the - * nrpages check is efficient enough in that case. - */ - if (newsize) { - struct shmem_inode_info *info = SHMEM_I(inode); - spin_lock(&info->lock); - info->flags &= ~SHMEM_PAGEIN; - spin_unlock(&info->lock); - } } if (newsize != oldsize) { i_size_write(inode, newsize); @@ -859,106 +356,28 @@ static void shmem_evict_inode(struct inode *inode) end_writeback(inode); } -static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) -{ - swp_entry_t *ptr; - - for (ptr = dir; ptr < edir; ptr++) { - if (ptr->val == entry.val) - return ptr - dir; - } - return -1; -} - static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) { - struct address_space *mapping; + struct address_space *mapping = info->vfs_inode.i_mapping; unsigned long idx; - unsigned long size; - unsigned long limit; - unsigned long stage; - struct page **dir; - struct page *subdir; - swp_entry_t *ptr; - int offset; int error; - idx = 0; - ptr = info->i_direct; - spin_lock(&info->lock); - if (!info->swapped) { - list_del_init(&info->swaplist); - goto lost2; - } - limit = info->next_index; - size = limit; - if (size > SHMEM_NR_DIRECT) - size = SHMEM_NR_DIRECT; - offset = shmem_find_swp(entry, ptr, ptr+size); - if (offset >= 0) { - shmem_swp_balance_unmap(); - goto found; - } - if (!info->i_indirect) - goto lost2; - - dir = shmem_dir_map(info->i_indirect); - stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; - - for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { - if (unlikely(idx == stage)) { - shmem_dir_unmap(dir-1); - if (cond_resched_lock(&info->lock)) { - /* check it has not been truncated */ - if (limit > info->next_index) { - limit = info->next_index; - if (idx >= limit) - goto lost2; - } - } - dir = shmem_dir_map(info->i_indirect) + - ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; - while (!*dir) { - dir++; - idx += ENTRIES_PER_PAGEPAGE; - if (idx >= limit) - goto lost1; - } - stage = idx + ENTRIES_PER_PAGEPAGE; - subdir = *dir; - shmem_dir_unmap(dir); - dir = shmem_dir_map(subdir); - } - subdir = *dir; - if (subdir && page_private(subdir)) { - ptr = shmem_swp_map(subdir); - size = limit - idx; - if (size > ENTRIES_PER_PAGE) - size = ENTRIES_PER_PAGE; - offset = shmem_find_swp(entry, ptr, ptr+size); - shmem_swp_unmap(ptr); - if (offset >= 0) { - shmem_dir_unmap(dir); - ptr = shmem_swp_map(subdir); - goto found; - } - } - } -lost1: - shmem_dir_unmap(dir-1); -lost2: - spin_unlock(&info->lock); + for (idx = 0; idx < SHMEM_NR_DIRECT; idx++) + if (shmem_get_swap(info, idx).val == entry.val) + goto found; return 0; found: - idx += offset; - ptr += offset; + spin_lock(&info->lock); + if (shmem_get_swap(info, idx).val != entry.val) { + spin_unlock(&info->lock); + return 0; + } /* * Move _head_ to start search for next from here. * But be careful: shmem_evict_inode checks list_empty without taking * mutex, and there's an instant in list_move_tail when info->swaplist - * would appear empty, if it were the only one on shmem_swaplist. We - * could avoid doing it if inode NULL; or use this minor optimization. + * would appear empty, if it were the only one on shmem_swaplist. */ if (shmem_swaplist.next != &info->swaplist) list_move_tail(&shmem_swaplist, &info->swaplist); @@ -968,19 +387,17 @@ found: * but also to hold up shmem_evict_inode(): so inode cannot be freed * beneath us (pagelock doesn't help until the page is in pagecache). */ - mapping = info->vfs_inode.i_mapping; error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT); /* which does mem_cgroup_uncharge_cache_page on error */ if (error != -ENOMEM) { delete_from_swap_cache(page); set_page_dirty(page); - info->flags |= SHMEM_PAGEIN; - shmem_swp_set(info, ptr, 0); + shmem_put_swap(info, idx, (swp_entry_t){0}); + info->swapped--; swap_free(entry); error = 1; /* not an error, but entry was found */ } - shmem_swp_unmap(ptr); spin_unlock(&info->lock); return error; } @@ -1017,7 +434,14 @@ int shmem_unuse(swp_entry_t entry, struct page *page) mutex_lock(&shmem_swaplist_mutex); list_for_each_safe(p, next, &shmem_swaplist) { info = list_entry(p, struct shmem_inode_info, swaplist); - found = shmem_unuse_inode(info, entry, page); + if (!info->swapped) { + spin_lock(&info->lock); + if (!info->swapped) + list_del_init(&info->swaplist); + spin_unlock(&info->lock); + } + if (info->swapped) + found = shmem_unuse_inode(info, entry, page); cond_resched(); if (found) break; @@ -1041,7 +465,7 @@ out: static int shmem_writepage(struct page *page, struct writeback_control *wbc) { struct shmem_inode_info *info; - swp_entry_t *entry, swap; + swp_entry_t swap, oswap; struct address_space *mapping; unsigned long index; struct inode *inode; @@ -1067,6 +491,15 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ goto redirty; } + + /* + * Just for this patch, we have a toy implementation, + * which can swap out only the first SHMEM_NR_DIRECT pages: + * for simple demonstration of where we need to think about swap. + */ + if (index >= SHMEM_NR_DIRECT) + goto redirty; + swap = get_swap_page(); if (!swap.val) goto redirty; @@ -1087,22 +520,19 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) spin_lock(&info->lock); mutex_unlock(&shmem_swaplist_mutex); - if (index >= info->next_index) { - BUG_ON(!(info->flags & SHMEM_TRUNCATE)); - goto unlock; - } - entry = shmem_swp_entry(info, index, NULL); - if (entry->val) { + oswap = shmem_get_swap(info, index); + if (oswap.val) { WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ - free_swap_and_cache(*entry); - shmem_swp_set(info, entry, 0); + free_swap_and_cache(oswap); + shmem_put_swap(info, index, (swp_entry_t){0}); + info->swapped--; } shmem_recalc_inode(inode); if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { delete_from_page_cache(page); - shmem_swp_set(info, entry, swap.val); - shmem_swp_unmap(entry); + shmem_put_swap(info, index, swap); + info->swapped++; swap_shmem_alloc(swap); spin_unlock(&info->lock); BUG_ON(page_mapped(page)); @@ -1110,13 +540,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) return 0; } - shmem_swp_unmap(entry); -unlock: spin_unlock(&info->lock); - /* - * add_to_swap_cache() doesn't return -EEXIST, so we can safely - * clear SWAP_HAS_CACHE flag. - */ swapcache_free(swap, NULL); redirty: set_page_dirty(page); @@ -1230,12 +654,10 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx, struct shmem_sb_info *sbinfo; struct page *page; struct page *prealloc_page = NULL; - swp_entry_t *entry; swp_entry_t swap; int error; - int ret; - if (idx >= SHMEM_MAX_INDEX) + if (idx > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) return -EFBIG; repeat: page = find_lock_page(mapping, idx); @@ -1272,37 +694,22 @@ repeat: spin_lock(&info->lock); shmem_recalc_inode(inode); - entry = shmem_swp_alloc(info, idx, sgp, gfp); - if (IS_ERR(entry)) { - spin_unlock(&info->lock); - error = PTR_ERR(entry); - goto out; - } - swap = *entry; - + swap = shmem_get_swap(info, idx); if (swap.val) { /* Look it up and read it in.. */ page = lookup_swap_cache(swap); if (!page) { - shmem_swp_unmap(entry); spin_unlock(&info->lock); /* here we actually do the io */ if (fault_type) *fault_type |= VM_FAULT_MAJOR; page = shmem_swapin(swap, gfp, info, idx); if (!page) { - spin_lock(&info->lock); - entry = shmem_swp_alloc(info, idx, sgp, gfp); - if (IS_ERR(entry)) - error = PTR_ERR(entry); - else { - if (entry->val == swap.val) - error = -ENOMEM; - shmem_swp_unmap(entry); - } - spin_unlock(&info->lock); - if (error) + swp_entry_t nswap = shmem_get_swap(info, idx); + if (nswap.val == swap.val) { + error = -ENOMEM; goto out; + } goto repeat; } wait_on_page_locked(page); @@ -1312,14 +719,12 @@ repeat: /* We have to do this with page locked to prevent races */ if (!trylock_page(page)) { - shmem_swp_unmap(entry); spin_unlock(&info->lock); wait_on_page_locked(page); page_cache_release(page); goto repeat; } if (PageWriteback(page)) { - shmem_swp_unmap(entry); spin_unlock(&info->lock); wait_on_page_writeback(page); unlock_page(page); @@ -1327,7 +732,6 @@ repeat: goto repeat; } if (!PageUptodate(page)) { - shmem_swp_unmap(entry); spin_unlock(&info->lock); unlock_page(page); page_cache_release(page); @@ -1338,7 +742,6 @@ repeat: error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT); if (error) { - shmem_swp_unmap(entry); spin_unlock(&info->lock); if (error == -ENOMEM) { /* @@ -1358,16 +761,14 @@ repeat: goto repeat; } - info->flags |= SHMEM_PAGEIN; - shmem_swp_set(info, entry, 0); - shmem_swp_unmap(entry); delete_from_swap_cache(page); + shmem_put_swap(info, idx, (swp_entry_t){0}); + info->swapped--; spin_unlock(&info->lock); set_page_dirty(page); swap_free(swap); } else if (sgp == SGP_READ) { - shmem_swp_unmap(entry); page = find_get_page(mapping, idx); if (page && !trylock_page(page)) { spin_unlock(&info->lock); @@ -1378,7 +779,6 @@ repeat: spin_unlock(&info->lock); } else if (prealloc_page) { - shmem_swp_unmap(entry); sbinfo = SHMEM_SB(inode->i_sb); if (sbinfo->max_blocks) { if (percpu_counter_compare(&sbinfo->used_blocks, @@ -1393,34 +793,24 @@ repeat: page = prealloc_page; prealloc_page = NULL; - entry = shmem_swp_alloc(info, idx, sgp, gfp); - if (IS_ERR(entry)) - error = PTR_ERR(entry); - else { - swap = *entry; - shmem_swp_unmap(entry); - } - ret = error || swap.val; - if (ret) + swap = shmem_get_swap(info, idx); + if (swap.val) mem_cgroup_uncharge_cache_page(page); else - ret = add_to_page_cache_lru(page, mapping, + error = add_to_page_cache_lru(page, mapping, idx, GFP_NOWAIT); /* * At add_to_page_cache_lru() failure, * uncharge will be done automatically. */ - if (ret) { + if (swap.val || error) { shmem_unacct_blocks(info->flags, 1); shmem_free_blocks(inode, 1); spin_unlock(&info->lock); page_cache_release(page); - if (error) - goto out; goto repeat; } - info->flags |= SHMEM_PAGEIN; info->alloced++; spin_unlock(&info->lock); clear_highpage(page); @@ -2627,7 +2017,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) goto failed; sbinfo->free_inodes = sbinfo->max_inodes; - sb->s_maxbytes = SHMEM_MAX_BYTES; + sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = TMPFS_MAGIC; @@ -2863,7 +2253,7 @@ out4: void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, struct page **pagep, swp_entry_t *ent) { - swp_entry_t entry = { .val = 0 }, *ptr; + swp_entry_t entry = { .val = 0 }; struct page *page = NULL; struct shmem_inode_info *info = SHMEM_I(inode); @@ -2871,16 +2261,13 @@ void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, goto out; spin_lock(&info->lock); - ptr = shmem_swp_entry(info, pgoff, NULL); #ifdef CONFIG_SWAP - if (ptr && ptr->val) { - entry.val = ptr->val; + entry = shmem_get_swap(info, pgoff); + if (entry.val) page = find_get_page(&swapper_space, entry.val); - } else + else #endif page = find_get_page(inode->i_mapping, pgoff); - if (ptr) - shmem_swp_unmap(ptr); spin_unlock(&info->lock); out: *pagep = page; @@ -2963,7 +2350,6 @@ out: #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) #define shmem_acct_size(flags, size) 0 #define shmem_unacct_size(flags, size) do {} while (0) -#define SHMEM_MAX_BYTES MAX_LFS_FILESIZE #endif /* CONFIG_SHMEM */ @@ -2987,7 +2373,7 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags if (IS_ERR(shm_mnt)) return (void *)shm_mnt; - if (size < 0 || size > SHMEM_MAX_BYTES) + if (size < 0 || size > MAX_LFS_FILESIZE) return ERR_PTR(-EINVAL); if (shmem_acct_size(flags, size)) -- cgit v1.2.3 From 41ffe5d5ceef7f7ff2ff18e320d88ca6d629efaf Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:21 -0700 Subject: tmpfs: miscellaneous trivial cleanups While it's at its least, make a number of boring nitpicky cleanups to shmem.c, mostly for consistency of variable naming. Things like "swap" instead of "entry", "pgoff_t index" instead of "unsigned long idx". And since everything else here is prefixed "shmem_", better change init_tmpfs() to shmem_init(). Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 216 ++++++++++++++++++++++++++++++------------------------------- 1 file changed, 107 insertions(+), 109 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 5574b00ca771..24e95ac16053 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -28,7 +28,6 @@ #include #include #include -#include #include static struct vfsmount *shm_mnt; @@ -51,6 +50,7 @@ static struct vfsmount *shm_mnt; #include #include #include +#include #include #include #include @@ -63,7 +63,6 @@ static struct vfsmount *shm_mnt; #include #include -#include #include #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) @@ -201,7 +200,7 @@ static void shmem_free_inode(struct super_block *sb) } /** - * shmem_recalc_inode - recalculate the size of an inode + * shmem_recalc_inode - recalculate the block usage of an inode * @inode: inode to recalc * * We have to calculate the free blocks since the mm can drop @@ -356,19 +355,20 @@ static void shmem_evict_inode(struct inode *inode) end_writeback(inode); } -static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) +static int shmem_unuse_inode(struct shmem_inode_info *info, + swp_entry_t swap, struct page *page) { struct address_space *mapping = info->vfs_inode.i_mapping; - unsigned long idx; + pgoff_t index; int error; - for (idx = 0; idx < SHMEM_NR_DIRECT; idx++) - if (shmem_get_swap(info, idx).val == entry.val) + for (index = 0; index < SHMEM_NR_DIRECT; index++) + if (shmem_get_swap(info, index).val == swap.val) goto found; return 0; found: spin_lock(&info->lock); - if (shmem_get_swap(info, idx).val != entry.val) { + if (shmem_get_swap(info, index).val != swap.val) { spin_unlock(&info->lock); return 0; } @@ -387,15 +387,15 @@ found: * but also to hold up shmem_evict_inode(): so inode cannot be freed * beneath us (pagelock doesn't help until the page is in pagecache). */ - error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT); + error = add_to_page_cache_locked(page, mapping, index, GFP_NOWAIT); /* which does mem_cgroup_uncharge_cache_page on error */ if (error != -ENOMEM) { delete_from_swap_cache(page); set_page_dirty(page); - shmem_put_swap(info, idx, (swp_entry_t){0}); + shmem_put_swap(info, index, (swp_entry_t){0}); info->swapped--; - swap_free(entry); + swap_free(swap); error = 1; /* not an error, but entry was found */ } spin_unlock(&info->lock); @@ -405,9 +405,9 @@ found: /* * shmem_unuse() search for an eventually swapped out shmem page. */ -int shmem_unuse(swp_entry_t entry, struct page *page) +int shmem_unuse(swp_entry_t swap, struct page *page) { - struct list_head *p, *next; + struct list_head *this, *next; struct shmem_inode_info *info; int found = 0; int error; @@ -432,8 +432,8 @@ int shmem_unuse(swp_entry_t entry, struct page *page) radix_tree_preload_end(); mutex_lock(&shmem_swaplist_mutex); - list_for_each_safe(p, next, &shmem_swaplist) { - info = list_entry(p, struct shmem_inode_info, swaplist); + list_for_each_safe(this, next, &shmem_swaplist) { + info = list_entry(this, struct shmem_inode_info, swaplist); if (!info->swapped) { spin_lock(&info->lock); if (!info->swapped) @@ -441,7 +441,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page) spin_unlock(&info->lock); } if (info->swapped) - found = shmem_unuse_inode(info, entry, page); + found = shmem_unuse_inode(info, swap, page); cond_resched(); if (found) break; @@ -467,7 +467,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) struct shmem_inode_info *info; swp_entry_t swap, oswap; struct address_space *mapping; - unsigned long index; + pgoff_t index; struct inode *inode; BUG_ON(!PageLocked(page)); @@ -577,35 +577,33 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) } #endif /* CONFIG_TMPFS */ -static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, - struct shmem_inode_info *info, unsigned long idx) +static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, + struct shmem_inode_info *info, pgoff_t index) { struct mempolicy mpol, *spol; struct vm_area_struct pvma; - struct page *page; spol = mpol_cond_copy(&mpol, - mpol_shared_policy_lookup(&info->policy, idx)); + mpol_shared_policy_lookup(&info->policy, index)); /* Create a pseudo vma that just contains the policy */ pvma.vm_start = 0; - pvma.vm_pgoff = idx; + pvma.vm_pgoff = index; pvma.vm_ops = NULL; pvma.vm_policy = spol; - page = swapin_readahead(entry, gfp, &pvma, 0); - return page; + return swapin_readahead(swap, gfp, &pvma, 0); } static struct page *shmem_alloc_page(gfp_t gfp, - struct shmem_inode_info *info, unsigned long idx) + struct shmem_inode_info *info, pgoff_t index) { struct vm_area_struct pvma; /* Create a pseudo vma that just contains the policy */ pvma.vm_start = 0; - pvma.vm_pgoff = idx; + pvma.vm_pgoff = index; pvma.vm_ops = NULL; - pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); + pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); /* * alloc_page_vma() will drop the shared policy reference @@ -614,19 +612,19 @@ static struct page *shmem_alloc_page(gfp_t gfp, } #else /* !CONFIG_NUMA */ #ifdef CONFIG_TMPFS -static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p) +static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) { } #endif /* CONFIG_TMPFS */ -static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, - struct shmem_inode_info *info, unsigned long idx) +static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, + struct shmem_inode_info *info, pgoff_t index) { - return swapin_readahead(entry, gfp, NULL, 0); + return swapin_readahead(swap, gfp, NULL, 0); } static inline struct page *shmem_alloc_page(gfp_t gfp, - struct shmem_inode_info *info, unsigned long idx) + struct shmem_inode_info *info, pgoff_t index) { return alloc_page(gfp); } @@ -646,7 +644,7 @@ static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) * vm. If we swap it in we mark it dirty since we also free the swap * entry since a page cannot live in both the swap and page cache */ -static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx, +static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) { struct address_space *mapping = inode->i_mapping; @@ -657,10 +655,10 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx, swp_entry_t swap; int error; - if (idx > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) + if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) return -EFBIG; repeat: - page = find_lock_page(mapping, idx); + page = find_lock_page(mapping, index); if (page) { /* * Once we can get the page lock, it must be uptodate: @@ -681,7 +679,7 @@ repeat: radix_tree_preload_end(); if (sgp != SGP_READ && !prealloc_page) { - prealloc_page = shmem_alloc_page(gfp, info, idx); + prealloc_page = shmem_alloc_page(gfp, info, index); if (prealloc_page) { SetPageSwapBacked(prealloc_page); if (mem_cgroup_cache_charge(prealloc_page, @@ -694,7 +692,7 @@ repeat: spin_lock(&info->lock); shmem_recalc_inode(inode); - swap = shmem_get_swap(info, idx); + swap = shmem_get_swap(info, index); if (swap.val) { /* Look it up and read it in.. */ page = lookup_swap_cache(swap); @@ -703,9 +701,9 @@ repeat: /* here we actually do the io */ if (fault_type) *fault_type |= VM_FAULT_MAJOR; - page = shmem_swapin(swap, gfp, info, idx); + page = shmem_swapin(swap, gfp, info, index); if (!page) { - swp_entry_t nswap = shmem_get_swap(info, idx); + swp_entry_t nswap = shmem_get_swap(info, index); if (nswap.val == swap.val) { error = -ENOMEM; goto out; @@ -740,7 +738,7 @@ repeat: } error = add_to_page_cache_locked(page, mapping, - idx, GFP_NOWAIT); + index, GFP_NOWAIT); if (error) { spin_unlock(&info->lock); if (error == -ENOMEM) { @@ -762,14 +760,14 @@ repeat: } delete_from_swap_cache(page); - shmem_put_swap(info, idx, (swp_entry_t){0}); + shmem_put_swap(info, index, (swp_entry_t){0}); info->swapped--; spin_unlock(&info->lock); set_page_dirty(page); swap_free(swap); } else if (sgp == SGP_READ) { - page = find_get_page(mapping, idx); + page = find_get_page(mapping, index); if (page && !trylock_page(page)) { spin_unlock(&info->lock); wait_on_page_locked(page); @@ -793,12 +791,12 @@ repeat: page = prealloc_page; prealloc_page = NULL; - swap = shmem_get_swap(info, idx); + swap = shmem_get_swap(info, index); if (swap.val) mem_cgroup_uncharge_cache_page(page); else error = add_to_page_cache_lru(page, mapping, - idx, GFP_NOWAIT); + index, GFP_NOWAIT); /* * At add_to_page_cache_lru() failure, * uncharge will be done automatically. @@ -841,7 +839,7 @@ nospace: * but must also avoid reporting a spurious ENOSPC while working on a * full tmpfs. */ - page = find_get_page(mapping, idx); + page = find_get_page(mapping, index); spin_unlock(&info->lock); if (page) { page_cache_release(page); @@ -872,20 +870,20 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } #ifdef CONFIG_NUMA -static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) +static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) { - struct inode *i = vma->vm_file->f_path.dentry->d_inode; - return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); + struct inode *inode = vma->vm_file->f_path.dentry->d_inode; + return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); } static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, unsigned long addr) { - struct inode *i = vma->vm_file->f_path.dentry->d_inode; - unsigned long idx; + struct inode *inode = vma->vm_file->f_path.dentry->d_inode; + pgoff_t index; - idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); + index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; + return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); } #endif @@ -1016,7 +1014,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_ { struct inode *inode = filp->f_path.dentry->d_inode; struct address_space *mapping = inode->i_mapping; - unsigned long index, offset; + pgoff_t index; + unsigned long offset; enum sgp_type sgp = SGP_READ; /* @@ -1032,7 +1031,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_ for (;;) { struct page *page = NULL; - unsigned long end_index, nr, ret; + pgoff_t end_index; + unsigned long nr, ret; loff_t i_size = i_size_read(inode); end_index = i_size >> PAGE_CACHE_SHIFT; @@ -1270,8 +1270,9 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_namelen = NAME_MAX; if (sbinfo->max_blocks) { buf->f_blocks = sbinfo->max_blocks; - buf->f_bavail = buf->f_bfree = - sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks); + buf->f_bavail = + buf->f_bfree = sbinfo->max_blocks - + percpu_counter_sum(&sbinfo->used_blocks); } if (sbinfo->max_inodes) { buf->f_files = sbinfo->max_inodes; @@ -1480,8 +1481,8 @@ static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *n static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *page = NULL; - int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); - nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); + int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); + nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); if (page) unlock_page(page); return page; @@ -1592,7 +1593,6 @@ out: return err; } - static const struct xattr_handler *shmem_xattr_handlers[] = { #ifdef CONFIG_TMPFS_POSIX_ACL &generic_acl_access_handler, @@ -2052,14 +2052,14 @@ static struct kmem_cache *shmem_inode_cachep; static struct inode *shmem_alloc_inode(struct super_block *sb) { - struct shmem_inode_info *p; - p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); - if (!p) + struct shmem_inode_info *info; + info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); + if (!info) return NULL; - return &p->vfs_inode; + return &info->vfs_inode; } -static void shmem_i_callback(struct rcu_head *head) +static void shmem_destroy_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); INIT_LIST_HEAD(&inode->i_dentry); @@ -2072,25 +2072,24 @@ static void shmem_destroy_inode(struct inode *inode) /* only struct inode is valid if it's an inline symlink */ mpol_free_shared_policy(&SHMEM_I(inode)->policy); } - call_rcu(&inode->i_rcu, shmem_i_callback); + call_rcu(&inode->i_rcu, shmem_destroy_callback); } -static void init_once(void *foo) +static void shmem_init_inode(void *foo) { - struct shmem_inode_info *p = (struct shmem_inode_info *) foo; - - inode_init_once(&p->vfs_inode); + struct shmem_inode_info *info = foo; + inode_init_once(&info->vfs_inode); } -static int init_inodecache(void) +static int shmem_init_inodecache(void) { shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", sizeof(struct shmem_inode_info), - 0, SLAB_PANIC, init_once); + 0, SLAB_PANIC, shmem_init_inode); return 0; } -static void destroy_inodecache(void) +static void shmem_destroy_inodecache(void) { kmem_cache_destroy(shmem_inode_cachep); } @@ -2187,21 +2186,20 @@ static const struct vm_operations_struct shmem_vm_ops = { #endif }; - static struct dentry *shmem_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_nodev(fs_type, flags, data, shmem_fill_super); } -static struct file_system_type tmpfs_fs_type = { +static struct file_system_type shmem_fs_type = { .owner = THIS_MODULE, .name = "tmpfs", .mount = shmem_mount, .kill_sb = kill_litter_super, }; -int __init init_tmpfs(void) +int __init shmem_init(void) { int error; @@ -2209,18 +2207,18 @@ int __init init_tmpfs(void) if (error) goto out4; - error = init_inodecache(); + error = shmem_init_inodecache(); if (error) goto out3; - error = register_filesystem(&tmpfs_fs_type); + error = register_filesystem(&shmem_fs_type); if (error) { printk(KERN_ERR "Could not register tmpfs\n"); goto out2; } - shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER, - tmpfs_fs_type.name, NULL); + shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER, + shmem_fs_type.name, NULL); if (IS_ERR(shm_mnt)) { error = PTR_ERR(shm_mnt); printk(KERN_ERR "Could not kern_mount tmpfs\n"); @@ -2229,9 +2227,9 @@ int __init init_tmpfs(void) return 0; out1: - unregister_filesystem(&tmpfs_fs_type); + unregister_filesystem(&shmem_fs_type); out2: - destroy_inodecache(); + shmem_destroy_inodecache(); out3: bdi_destroy(&shmem_backing_dev_info); out4: @@ -2241,37 +2239,37 @@ out4: #ifdef CONFIG_CGROUP_MEM_RES_CTLR /** - * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file + * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file * @inode: the inode to be searched - * @pgoff: the offset to be searched + * @index: the page offset to be searched * @pagep: the pointer for the found page to be stored - * @ent: the pointer for the found swap entry to be stored + * @swapp: the pointer for the found swap entry to be stored * * If a page is found, refcount of it is incremented. Callers should handle * these refcount. */ -void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, - struct page **pagep, swp_entry_t *ent) +void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, + struct page **pagep, swp_entry_t *swapp) { - swp_entry_t entry = { .val = 0 }; - struct page *page = NULL; struct shmem_inode_info *info = SHMEM_I(inode); + struct page *page = NULL; + swp_entry_t swap = {0}; - if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) + if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) goto out; spin_lock(&info->lock); #ifdef CONFIG_SWAP - entry = shmem_get_swap(info, pgoff); - if (entry.val) - page = find_get_page(&swapper_space, entry.val); + swap = shmem_get_swap(info, index); + if (swap.val) + page = find_get_page(&swapper_space, swap.val); else #endif - page = find_get_page(inode->i_mapping, pgoff); + page = find_get_page(inode->i_mapping, index); spin_unlock(&info->lock); out: *pagep = page; - *ent = entry; + *swapp = swap; } #endif @@ -2288,23 +2286,23 @@ out: #include -static struct file_system_type tmpfs_fs_type = { +static struct file_system_type shmem_fs_type = { .name = "tmpfs", .mount = ramfs_mount, .kill_sb = kill_litter_super, }; -int __init init_tmpfs(void) +int __init shmem_init(void) { - BUG_ON(register_filesystem(&tmpfs_fs_type) != 0); + BUG_ON(register_filesystem(&shmem_fs_type) != 0); - shm_mnt = kern_mount(&tmpfs_fs_type); + shm_mnt = kern_mount(&shmem_fs_type); BUG_ON(IS_ERR(shm_mnt)); return 0; } -int shmem_unuse(swp_entry_t entry, struct page *page) +int shmem_unuse(swp_entry_t swap, struct page *page) { return 0; } @@ -2314,34 +2312,34 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) return 0; } -void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) +void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) { - truncate_inode_pages_range(inode->i_mapping, start, end); + truncate_inode_pages_range(inode->i_mapping, lstart, lend); } EXPORT_SYMBOL_GPL(shmem_truncate_range); #ifdef CONFIG_CGROUP_MEM_RES_CTLR /** - * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file + * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file * @inode: the inode to be searched - * @pgoff: the offset to be searched + * @index: the page offset to be searched * @pagep: the pointer for the found page to be stored - * @ent: the pointer for the found swap entry to be stored + * @swapp: the pointer for the found swap entry to be stored * * If a page is found, refcount of it is incremented. Callers should handle * these refcount. */ -void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, - struct page **pagep, swp_entry_t *ent) +void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, + struct page **pagep, swp_entry_t *swapp) { struct page *page = NULL; - if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) + if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) goto out; - page = find_get_page(inode->i_mapping, pgoff); + page = find_get_page(inode->i_mapping, index); out: *pagep = page; - *ent = (swp_entry_t){ .val = 0 }; + *swapp = (swp_entry_t){0}; } #endif -- cgit v1.2.3 From bda97eab0cc9c6385b9f26abdda6459f630f4513 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:21 -0700 Subject: tmpfs: copy truncate_inode_pages_range Bring truncate.c's code for truncate_inode_pages_range() inline into shmem_truncate_range(), replacing its first call (there's a followup call below, but leave that one, it will disappear next). Don't play with it yet, apart from leaving out the cleancache flush, and (importantly) the nrpages == 0 skip, and moving shmem_setattr()'s partial page preparation into its partial page handling. Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 99 +++++++++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 79 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 24e95ac16053..e101c211ed1f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -50,6 +50,7 @@ static struct vfsmount *shm_mnt; #include #include #include +#include #include #include #include @@ -242,11 +243,88 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); pgoff_t end = (lend >> PAGE_CACHE_SHIFT); + struct pagevec pvec; pgoff_t index; swp_entry_t swap; + int i; + + BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); + + pagevec_init(&pvec, 0); + index = start; + while (index <= end && pagevec_lookup(&pvec, mapping, index, + min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { + mem_cgroup_uncharge_start(); + for (i = 0; i < pagevec_count(&pvec); i++) { + struct page *page = pvec.pages[i]; + + /* We rely upon deletion not changing page->index */ + index = page->index; + if (index > end) + break; + + if (!trylock_page(page)) + continue; + WARN_ON(page->index != index); + if (PageWriteback(page)) { + unlock_page(page); + continue; + } + truncate_inode_page(mapping, page); + unlock_page(page); + } + pagevec_release(&pvec); + mem_cgroup_uncharge_end(); + cond_resched(); + index++; + } + + if (partial) { + struct page *page = NULL; + shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); + if (page) { + zero_user_segment(page, partial, PAGE_CACHE_SIZE); + set_page_dirty(page); + unlock_page(page); + page_cache_release(page); + } + } + + index = start; + for ( ; ; ) { + cond_resched(); + if (!pagevec_lookup(&pvec, mapping, index, + min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { + if (index == start) + break; + index = start; + continue; + } + if (index == start && pvec.pages[0]->index > end) { + pagevec_release(&pvec); + break; + } + mem_cgroup_uncharge_start(); + for (i = 0; i < pagevec_count(&pvec); i++) { + struct page *page = pvec.pages[i]; - truncate_inode_pages_range(mapping, lstart, lend); + /* We rely upon deletion not changing page->index */ + index = page->index; + if (index > end) + break; + + lock_page(page); + WARN_ON(page->index != index); + wait_on_page_writeback(page); + truncate_inode_page(mapping, page); + unlock_page(page); + } + pagevec_release(&pvec); + mem_cgroup_uncharge_end(); + index++; + } if (end > SHMEM_NR_DIRECT) end = SHMEM_NR_DIRECT; @@ -289,24 +367,7 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr) if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { loff_t oldsize = inode->i_size; loff_t newsize = attr->ia_size; - struct page *page = NULL; - if (newsize < oldsize) { - /* - * If truncating down to a partial page, then - * if that page is already allocated, hold it - * in memory until the truncation is over, so - * truncate_partial_page cannot miss it were - * it assigned to swap. - */ - if (newsize & (PAGE_CACHE_SIZE-1)) { - (void) shmem_getpage(inode, - newsize >> PAGE_CACHE_SHIFT, - &page, SGP_READ, NULL); - if (page) - unlock_page(page); - } - } if (newsize != oldsize) { i_size_write(inode, newsize); inode->i_ctime = inode->i_mtime = CURRENT_TIME; @@ -318,8 +379,6 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr) /* unmap again to remove racily COWed private pages */ unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); } - if (page) - page_cache_release(page); } setattr_copy(inode, attr); -- cgit v1.2.3 From 7a5d0fbb29936fad7f17b1cb001b0c33a5f13328 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:22 -0700 Subject: tmpfs: convert shmem_truncate_range to radix-swap Disable the toy swapping implementation in shmem_writepage() - it's hard to support two schemes at once - and convert shmem_truncate_range() to a lockless gang lookup of swap entries along with pages, freeing both. Since the second loop tightens its noose until all entries of either kind have been squeezed out (and we shall make sure that there's not an instant when neither is visible), there is no longer a need for yet another pass below. shmem_radix_tree_replace() compensates for the lockless lookup by checking that the expected entry is in place, under lock, before replacing it. Here it just deletes, but will be used in later patches to substitute swap entry for page or page for swap entry. Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 192 ++++++++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 146 insertions(+), 46 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index e101c211ed1f..4439b7d55819 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -238,6 +238,111 @@ static swp_entry_t shmem_get_swap(struct shmem_inode_info *info, pgoff_t index) info->i_direct[index] : (swp_entry_t){0}; } +/* + * Replace item expected in radix tree by a new item, while holding tree lock. + */ +static int shmem_radix_tree_replace(struct address_space *mapping, + pgoff_t index, void *expected, void *replacement) +{ + void **pslot; + void *item = NULL; + + VM_BUG_ON(!expected); + pslot = radix_tree_lookup_slot(&mapping->page_tree, index); + if (pslot) + item = radix_tree_deref_slot_protected(pslot, + &mapping->tree_lock); + if (item != expected) + return -ENOENT; + if (replacement) + radix_tree_replace_slot(pslot, replacement); + else + radix_tree_delete(&mapping->page_tree, index); + return 0; +} + +/* + * Like find_get_pages, but collecting swap entries as well as pages. + */ +static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping, + pgoff_t start, unsigned int nr_pages, + struct page **pages, pgoff_t *indices) +{ + unsigned int i; + unsigned int ret; + unsigned int nr_found; + + rcu_read_lock(); +restart: + nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, + (void ***)pages, indices, start, nr_pages); + ret = 0; + for (i = 0; i < nr_found; i++) { + struct page *page; +repeat: + page = radix_tree_deref_slot((void **)pages[i]); + if (unlikely(!page)) + continue; + if (radix_tree_exception(page)) { + if (radix_tree_exceptional_entry(page)) + goto export; + /* radix_tree_deref_retry(page) */ + goto restart; + } + if (!page_cache_get_speculative(page)) + goto repeat; + + /* Has the page moved? */ + if (unlikely(page != *((void **)pages[i]))) { + page_cache_release(page); + goto repeat; + } +export: + indices[ret] = indices[i]; + pages[ret] = page; + ret++; + } + if (unlikely(!ret && nr_found)) + goto restart; + rcu_read_unlock(); + return ret; +} + +/* + * Remove swap entry from radix tree, free the swap and its page cache. + */ +static int shmem_free_swap(struct address_space *mapping, + pgoff_t index, void *radswap) +{ + int error; + + spin_lock_irq(&mapping->tree_lock); + error = shmem_radix_tree_replace(mapping, index, radswap, NULL); + spin_unlock_irq(&mapping->tree_lock); + if (!error) + free_swap_and_cache(radix_to_swp_entry(radswap)); + return error; +} + +/* + * Pagevec may contain swap entries, so shuffle up pages before releasing. + */ +static void shmem_pagevec_release(struct pagevec *pvec) +{ + int i, j; + + for (i = 0, j = 0; i < pagevec_count(pvec); i++) { + struct page *page = pvec->pages[i]; + if (!radix_tree_exceptional_entry(page)) + pvec->pages[j++] = page; + } + pvec->nr = j; + pagevec_release(pvec); +} + +/* + * Remove range of pages and swap entries from radix tree, and free them. + */ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) { struct address_space *mapping = inode->i_mapping; @@ -246,36 +351,44 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); pgoff_t end = (lend >> PAGE_CACHE_SHIFT); struct pagevec pvec; + pgoff_t indices[PAGEVEC_SIZE]; + long nr_swaps_freed = 0; pgoff_t index; - swp_entry_t swap; int i; BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); pagevec_init(&pvec, 0); index = start; - while (index <= end && pagevec_lookup(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { + while (index <= end) { + pvec.nr = shmem_find_get_pages_and_swap(mapping, index, + min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, + pvec.pages, indices); + if (!pvec.nr) + break; mem_cgroup_uncharge_start(); for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; - /* We rely upon deletion not changing page->index */ - index = page->index; + index = indices[i]; if (index > end) break; - if (!trylock_page(page)) + if (radix_tree_exceptional_entry(page)) { + nr_swaps_freed += !shmem_free_swap(mapping, + index, page); continue; - WARN_ON(page->index != index); - if (PageWriteback(page)) { - unlock_page(page); + } + + if (!trylock_page(page)) continue; + if (page->mapping == mapping) { + VM_BUG_ON(PageWriteback(page)); + truncate_inode_page(mapping, page); } - truncate_inode_page(mapping, page); unlock_page(page); } - pagevec_release(&pvec); + shmem_pagevec_release(&pvec); mem_cgroup_uncharge_end(); cond_resched(); index++; @@ -295,59 +408,47 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) index = start; for ( ; ; ) { cond_resched(); - if (!pagevec_lookup(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { + pvec.nr = shmem_find_get_pages_and_swap(mapping, index, + min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, + pvec.pages, indices); + if (!pvec.nr) { if (index == start) break; index = start; continue; } - if (index == start && pvec.pages[0]->index > end) { - pagevec_release(&pvec); + if (index == start && indices[0] > end) { + shmem_pagevec_release(&pvec); break; } mem_cgroup_uncharge_start(); for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; - /* We rely upon deletion not changing page->index */ - index = page->index; + index = indices[i]; if (index > end) break; + if (radix_tree_exceptional_entry(page)) { + nr_swaps_freed += !shmem_free_swap(mapping, + index, page); + continue; + } + lock_page(page); - WARN_ON(page->index != index); - wait_on_page_writeback(page); - truncate_inode_page(mapping, page); + if (page->mapping == mapping) { + VM_BUG_ON(PageWriteback(page)); + truncate_inode_page(mapping, page); + } unlock_page(page); } - pagevec_release(&pvec); + shmem_pagevec_release(&pvec); mem_cgroup_uncharge_end(); index++; } - if (end > SHMEM_NR_DIRECT) - end = SHMEM_NR_DIRECT; - spin_lock(&info->lock); - for (index = start; index < end; index++) { - swap = shmem_get_swap(info, index); - if (swap.val) { - free_swap_and_cache(swap); - shmem_put_swap(info, index, (swp_entry_t){0}); - info->swapped--; - } - } - - if (mapping->nrpages) { - spin_unlock(&info->lock); - /* - * A page may have meanwhile sneaked in from swap. - */ - truncate_inode_pages_range(mapping, lstart, lend); - spin_lock(&info->lock); - } - + info->swapped -= nr_swaps_freed; shmem_recalc_inode(inode); spin_unlock(&info->lock); @@ -552,11 +653,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) } /* - * Just for this patch, we have a toy implementation, - * which can swap out only the first SHMEM_NR_DIRECT pages: - * for simple demonstration of where we need to think about swap. + * Disable even the toy swapping implementation, while we convert + * functions one by one to having swap entries in the radix tree. */ - if (index >= SHMEM_NR_DIRECT) + if (index < ULONG_MAX) goto redirty; swap = get_swap_page(); -- cgit v1.2.3 From 46f65ec15c6878a2b4a49f6e01b20b201b46a9e4 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:23 -0700 Subject: tmpfs: convert shmem_unuse_inode to radix-swap Convert shmem_unuse_inode() to use a lockless gang lookup of the radix tree, searching for matching swap. This is somewhat slower than the old method: because of repeated radix tree descents, because of copying entries up, but probably most because the old method noted and skipped once a vector page was cleared of swap. Perhaps we can devise a use of radix tree tagging to achieve that later. shmem_add_to_page_cache() uses shmem_radix_tree_replace() to compensate for the lockless lookup by checking that the expected entry is in place, under lock. It is not very satisfactory to be copying this much from add_to_page_cache_locked(), but I think easier to sell than insisting that every caller of add_to_page_cache*() go through the extras. Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 133 +++++++++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 107 insertions(+), 26 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 4439b7d55819..174f97188e8a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -261,6 +261,55 @@ static int shmem_radix_tree_replace(struct address_space *mapping, return 0; } +/* + * Like add_to_page_cache_locked, but error if expected item has gone. + */ +static int shmem_add_to_page_cache(struct page *page, + struct address_space *mapping, + pgoff_t index, gfp_t gfp, void *expected) +{ + int error; + + VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON(!PageSwapBacked(page)); + + error = mem_cgroup_cache_charge(page, current->mm, + gfp & GFP_RECLAIM_MASK); + if (error) + goto out; + if (!expected) + error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); + if (!error) { + page_cache_get(page); + page->mapping = mapping; + page->index = index; + + spin_lock_irq(&mapping->tree_lock); + if (!expected) + error = radix_tree_insert(&mapping->page_tree, + index, page); + else + error = shmem_radix_tree_replace(mapping, index, + expected, page); + if (!error) { + mapping->nrpages++; + __inc_zone_page_state(page, NR_FILE_PAGES); + __inc_zone_page_state(page, NR_SHMEM); + spin_unlock_irq(&mapping->tree_lock); + } else { + page->mapping = NULL; + spin_unlock_irq(&mapping->tree_lock); + page_cache_release(page); + } + if (!expected) + radix_tree_preload_end(); + } + if (error) + mem_cgroup_uncharge_cache_page(page); +out: + return error; +} + /* * Like find_get_pages, but collecting swap entries as well as pages. */ @@ -308,6 +357,42 @@ export: return ret; } +/* + * Lockless lookup of swap entry in radix tree, avoiding refcount on pages. + */ +static pgoff_t shmem_find_swap(struct address_space *mapping, void *radswap) +{ + void **slots[PAGEVEC_SIZE]; + pgoff_t indices[PAGEVEC_SIZE]; + unsigned int nr_found; + +restart: + nr_found = 1; + indices[0] = -1; + while (nr_found) { + pgoff_t index = indices[nr_found - 1] + 1; + unsigned int i; + + rcu_read_lock(); + nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, + slots, indices, index, PAGEVEC_SIZE); + for (i = 0; i < nr_found; i++) { + void *item = radix_tree_deref_slot(slots[i]); + if (radix_tree_deref_retry(item)) { + rcu_read_unlock(); + goto restart; + } + if (item == radswap) { + rcu_read_unlock(); + return indices[i]; + } + } + rcu_read_unlock(); + cond_resched(); + } + return -1; +} + /* * Remove swap entry from radix tree, free the swap and its page cache. */ @@ -515,23 +600,21 @@ static void shmem_evict_inode(struct inode *inode) end_writeback(inode); } +/* + * If swap found in inode, free it and move page from swapcache to filecache. + */ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t swap, struct page *page) { struct address_space *mapping = info->vfs_inode.i_mapping; + void *radswap; pgoff_t index; int error; - for (index = 0; index < SHMEM_NR_DIRECT; index++) - if (shmem_get_swap(info, index).val == swap.val) - goto found; - return 0; -found: - spin_lock(&info->lock); - if (shmem_get_swap(info, index).val != swap.val) { - spin_unlock(&info->lock); + radswap = swp_to_radix_entry(swap); + index = shmem_find_swap(mapping, radswap); + if (index == -1) return 0; - } /* * Move _head_ to start search for next from here. @@ -547,23 +630,30 @@ found: * but also to hold up shmem_evict_inode(): so inode cannot be freed * beneath us (pagelock doesn't help until the page is in pagecache). */ - error = add_to_page_cache_locked(page, mapping, index, GFP_NOWAIT); + error = shmem_add_to_page_cache(page, mapping, index, + GFP_NOWAIT, radswap); /* which does mem_cgroup_uncharge_cache_page on error */ if (error != -ENOMEM) { + /* + * Truncation and eviction use free_swap_and_cache(), which + * only does trylock page: if we raced, best clean up here. + */ delete_from_swap_cache(page); set_page_dirty(page); - shmem_put_swap(info, index, (swp_entry_t){0}); - info->swapped--; - swap_free(swap); + if (!error) { + spin_lock(&info->lock); + info->swapped--; + spin_unlock(&info->lock); + swap_free(swap); + } error = 1; /* not an error, but entry was found */ } - spin_unlock(&info->lock); return error; } /* - * shmem_unuse() search for an eventually swapped out shmem page. + * Search through swapped inodes to find and replace swap by page. */ int shmem_unuse(swp_entry_t swap, struct page *page) { @@ -576,20 +666,12 @@ int shmem_unuse(swp_entry_t swap, struct page *page) * Charge page using GFP_KERNEL while we can wait, before taking * the shmem_swaplist_mutex which might hold up shmem_writepage(). * Charged back to the user (not to caller) when swap account is used. - * add_to_page_cache() will be called with GFP_NOWAIT. + * shmem_add_to_page_cache() will be called with GFP_NOWAIT. */ error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); if (error) goto out; - /* - * Try to preload while we can wait, to not make a habit of - * draining atomic reserves; but don't latch on to this cpu, - * it's okay if sometimes we get rescheduled after this. - */ - error = radix_tree_preload(GFP_KERNEL); - if (error) - goto uncharge; - radix_tree_preload_end(); + /* No radix_tree_preload: swap entry keeps a place for page in tree */ mutex_lock(&shmem_swaplist_mutex); list_for_each_safe(this, next, &shmem_swaplist) { @@ -608,7 +690,6 @@ int shmem_unuse(swp_entry_t swap, struct page *page) } mutex_unlock(&shmem_swaplist_mutex); -uncharge: if (!found) mem_cgroup_uncharge_cache_page(page); if (found < 0) -- cgit v1.2.3 From 54af60421822bb9cb664dd5cd7aac46c01ccfcf8 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:24 -0700 Subject: tmpfs: convert shmem_getpage_gfp to radix-swap Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or swap entry returned from radix tree by find_lock_page(). Whereas the repetitive old method proceeded mainly under info->lock, dropping and repeating whenever one of the conditions needed was not met, now we can proceed without it, leaving shmem_add_to_page_cache() to check for a race. This way there is no need to preallocate a page, no need for an early radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback(). Move the error unwinding down to the bottom instead of repeating it throughout. ENOSPC handling is a little different from before: there is no longer any race between find_lock_page() and finding swap, but we can arrive at ENOSPC before calling shmem_recalc_inode(), which might occasionally discover freed space. Be stricter to check i_size before returning. info->lock is used for little but alloced, swapped, i_blocks updates. Move i_blocks updates out from under the max_blocks check, so even an unlimited size=0 mount can show accurate du. Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 259 ++++++++++++++++++++++++++----------------------------------- 1 file changed, 112 insertions(+), 147 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 174f97188e8a..92f01d7cc150 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -166,15 +166,6 @@ static struct backing_dev_info shmem_backing_dev_info __read_mostly = { static LIST_HEAD(shmem_swaplist); static DEFINE_MUTEX(shmem_swaplist_mutex); -static void shmem_free_blocks(struct inode *inode, long pages) -{ - struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); - if (sbinfo->max_blocks) { - percpu_counter_add(&sbinfo->used_blocks, -pages); - inode->i_blocks -= pages*BLOCKS_PER_PAGE; - } -} - static int shmem_reserve_inode(struct super_block *sb) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); @@ -219,9 +210,12 @@ static void shmem_recalc_inode(struct inode *inode) freed = info->alloced - info->swapped - inode->i_mapping->nrpages; if (freed > 0) { + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + if (sbinfo->max_blocks) + percpu_counter_add(&sbinfo->used_blocks, -freed); info->alloced -= freed; + inode->i_blocks -= freed * BLOCKS_PER_PAGE; shmem_unacct_blocks(info->flags, freed); - shmem_free_blocks(inode, freed); } } @@ -888,205 +882,180 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) { struct address_space *mapping = inode->i_mapping; - struct shmem_inode_info *info = SHMEM_I(inode); + struct shmem_inode_info *info; struct shmem_sb_info *sbinfo; struct page *page; - struct page *prealloc_page = NULL; swp_entry_t swap; int error; + int once = 0; if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) return -EFBIG; repeat: + swap.val = 0; page = find_lock_page(mapping, index); - if (page) { + if (radix_tree_exceptional_entry(page)) { + swap = radix_to_swp_entry(page); + page = NULL; + } + + if (sgp != SGP_WRITE && + ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { + error = -EINVAL; + goto failed; + } + + if (page || (sgp == SGP_READ && !swap.val)) { /* * Once we can get the page lock, it must be uptodate: * if there were an error in reading back from swap, * the page would not be inserted into the filecache. */ - BUG_ON(!PageUptodate(page)); - goto done; + BUG_ON(page && !PageUptodate(page)); + *pagep = page; + return 0; } /* - * Try to preload while we can wait, to not make a habit of - * draining atomic reserves; but don't latch on to this cpu. + * Fast cache lookup did not find it: + * bring it back from swap or allocate. */ - error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); - if (error) - goto out; - radix_tree_preload_end(); - - if (sgp != SGP_READ && !prealloc_page) { - prealloc_page = shmem_alloc_page(gfp, info, index); - if (prealloc_page) { - SetPageSwapBacked(prealloc_page); - if (mem_cgroup_cache_charge(prealloc_page, - current->mm, GFP_KERNEL)) { - page_cache_release(prealloc_page); - prealloc_page = NULL; - } - } - } + info = SHMEM_I(inode); + sbinfo = SHMEM_SB(inode->i_sb); - spin_lock(&info->lock); - shmem_recalc_inode(inode); - swap = shmem_get_swap(info, index); if (swap.val) { /* Look it up and read it in.. */ page = lookup_swap_cache(swap); if (!page) { - spin_unlock(&info->lock); /* here we actually do the io */ if (fault_type) *fault_type |= VM_FAULT_MAJOR; page = shmem_swapin(swap, gfp, info, index); if (!page) { - swp_entry_t nswap = shmem_get_swap(info, index); - if (nswap.val == swap.val) { - error = -ENOMEM; - goto out; - } - goto repeat; + error = -ENOMEM; + goto failed; } - wait_on_page_locked(page); - page_cache_release(page); - goto repeat; } /* We have to do this with page locked to prevent races */ - if (!trylock_page(page)) { - spin_unlock(&info->lock); - wait_on_page_locked(page); - page_cache_release(page); - goto repeat; - } - if (PageWriteback(page)) { - spin_unlock(&info->lock); - wait_on_page_writeback(page); - unlock_page(page); - page_cache_release(page); - goto repeat; - } + lock_page(page); if (!PageUptodate(page)) { - spin_unlock(&info->lock); - unlock_page(page); - page_cache_release(page); error = -EIO; - goto out; + goto failed; } - - error = add_to_page_cache_locked(page, mapping, - index, GFP_NOWAIT); - if (error) { - spin_unlock(&info->lock); - if (error == -ENOMEM) { - /* - * reclaim from proper memory cgroup and - * call memcg's OOM if needed. - */ - error = mem_cgroup_shmem_charge_fallback( - page, current->mm, gfp); - if (error) { - unlock_page(page); - page_cache_release(page); - goto out; - } - } - unlock_page(page); - page_cache_release(page); - goto repeat; + wait_on_page_writeback(page); + + /* Someone may have already done it for us */ + if (page->mapping) { + if (page->mapping == mapping && + page->index == index) + goto done; + error = -EEXIST; + goto failed; } - delete_from_swap_cache(page); - shmem_put_swap(info, index, (swp_entry_t){0}); + error = shmem_add_to_page_cache(page, mapping, index, + gfp, swp_to_radix_entry(swap)); + if (error) + goto failed; + + spin_lock(&info->lock); info->swapped--; + shmem_recalc_inode(inode); spin_unlock(&info->lock); + + delete_from_swap_cache(page); set_page_dirty(page); swap_free(swap); - } else if (sgp == SGP_READ) { - page = find_get_page(mapping, index); - if (page && !trylock_page(page)) { - spin_unlock(&info->lock); - wait_on_page_locked(page); - page_cache_release(page); - goto repeat; + } else { + if (shmem_acct_block(info->flags)) { + error = -ENOSPC; + goto failed; } - spin_unlock(&info->lock); - - } else if (prealloc_page) { - sbinfo = SHMEM_SB(inode->i_sb); if (sbinfo->max_blocks) { if (percpu_counter_compare(&sbinfo->used_blocks, - sbinfo->max_blocks) >= 0 || - shmem_acct_block(info->flags)) - goto nospace; + sbinfo->max_blocks) >= 0) { + error = -ENOSPC; + goto unacct; + } percpu_counter_inc(&sbinfo->used_blocks); - inode->i_blocks += BLOCKS_PER_PAGE; - } else if (shmem_acct_block(info->flags)) - goto nospace; - - page = prealloc_page; - prealloc_page = NULL; + } - swap = shmem_get_swap(info, index); - if (swap.val) - mem_cgroup_uncharge_cache_page(page); - else - error = add_to_page_cache_lru(page, mapping, - index, GFP_NOWAIT); - /* - * At add_to_page_cache_lru() failure, - * uncharge will be done automatically. - */ - if (swap.val || error) { - shmem_unacct_blocks(info->flags, 1); - shmem_free_blocks(inode, 1); - spin_unlock(&info->lock); - page_cache_release(page); - goto repeat; + page = shmem_alloc_page(gfp, info, index); + if (!page) { + error = -ENOMEM; + goto decused; } + SetPageSwapBacked(page); + __set_page_locked(page); + error = shmem_add_to_page_cache(page, mapping, index, + gfp, NULL); + if (error) + goto decused; + lru_cache_add_anon(page); + + spin_lock(&info->lock); info->alloced++; + inode->i_blocks += BLOCKS_PER_PAGE; + shmem_recalc_inode(inode); spin_unlock(&info->lock); + clear_highpage(page); flush_dcache_page(page); SetPageUptodate(page); if (sgp == SGP_DIRTY) set_page_dirty(page); - - } else { - spin_unlock(&info->lock); - error = -ENOMEM; - goto out; } done: - *pagep = page; - error = 0; -out: - if (prealloc_page) { - mem_cgroup_uncharge_cache_page(prealloc_page); - page_cache_release(prealloc_page); + /* Perhaps the file has been truncated since we checked */ + if (sgp != SGP_WRITE && + ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { + error = -EINVAL; + goto trunc; } - return error; + *pagep = page; + return 0; -nospace: /* - * Perhaps the page was brought in from swap between find_lock_page - * and taking info->lock? We allow for that at add_to_page_cache_lru, - * but must also avoid reporting a spurious ENOSPC while working on a - * full tmpfs. + * Error recovery. */ - page = find_get_page(mapping, index); +trunc: + ClearPageDirty(page); + delete_from_page_cache(page); + spin_lock(&info->lock); + info->alloced--; + inode->i_blocks -= BLOCKS_PER_PAGE; spin_unlock(&info->lock); +decused: + if (sbinfo->max_blocks) + percpu_counter_add(&sbinfo->used_blocks, -1); +unacct: + shmem_unacct_blocks(info->flags, 1); +failed: + if (swap.val && error != -EINVAL) { + struct page *test = find_get_page(mapping, index); + if (test && !radix_tree_exceptional_entry(test)) + page_cache_release(test); + /* Have another try if the entry has changed */ + if (test != swp_to_radix_entry(swap)) + error = -EEXIST; + } if (page) { + unlock_page(page); page_cache_release(page); + } + if (error == -ENOSPC && !once++) { + info = SHMEM_I(inode); + spin_lock(&info->lock); + shmem_recalc_inode(inode); + spin_unlock(&info->lock); goto repeat; } - error = -ENOSPC; - goto out; + if (error == -EEXIST) + goto repeat; + return error; } static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) @@ -1095,9 +1064,6 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) int error; int ret = VM_FAULT_LOCKED; - if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) - return VM_FAULT_SIGBUS; - error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); if (error) return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); @@ -2164,8 +2130,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) if (config.max_inodes < inodes) goto out; /* - * Those tests also disallow limited->unlimited while any are in - * use, so i_blocks will always be zero when max_blocks is zero; + * Those tests disallow limited->unlimited while any are in use; * but we must separately disallow unlimited->limited, because * in that case we have no record of how much is already in use. */ -- cgit v1.2.3 From aa3b189551ad8e5cc1d9c663735c131650238278 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:24 -0700 Subject: tmpfs: convert mem_cgroup shmem to radix-swap Remove mem_cgroup_shmem_charge_fallback(): it was only required when we had to move swappage to filecache with GFP_NOWAIT. Remove the GFP_NOWAIT special case from mem_cgroup_cache_charge(), by moving its call out from shmem_add_to_page_cache() to two of thats three callers. But leave it doing mem_cgroup_uncharge_cache_page() on error: although asymmetrical, it's easier for all 3 callers to handle. These two changes would also be appropriate if anyone were to start using shmem_read_mapping_page_gfp() with GFP_NOWAIT. Remove mem_cgroup_get_shmem_target(): mc_handle_file_pte() can test radix_tree_exceptional_entry() to get what it needs for itself. Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 66 +++++++-------------------------------------- mm/shmem.c | 83 ++++++++------------------------------------------------- 2 files changed, 20 insertions(+), 129 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5f84d2351ddb..f4ec4e7ca4cd 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include @@ -2873,30 +2872,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, return 0; if (PageCompound(page)) return 0; - /* - * Corner case handling. This is called from add_to_page_cache() - * in usual. But some FS (shmem) precharges this page before calling it - * and call add_to_page_cache() with GFP_NOWAIT. - * - * For GFP_NOWAIT case, the page may be pre-charged before calling - * add_to_page_cache(). (See shmem.c) check it here and avoid to call - * charge twice. (It works but has to pay a bit larger cost.) - * And when the page is SwapCache, it should take swap information - * into account. This is under lock_page() now. - */ - if (!(gfp_mask & __GFP_WAIT)) { - struct page_cgroup *pc; - - pc = lookup_page_cgroup(page); - if (!pc) - return 0; - lock_page_cgroup(pc); - if (PageCgroupUsed(pc)) { - unlock_page_cgroup(pc); - return 0; - } - unlock_page_cgroup(pc); - } if (unlikely(!mm)) mm = &init_mm; @@ -3486,31 +3461,6 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem, cgroup_release_and_wakeup_rmdir(&mem->css); } -/* - * A call to try to shrink memory usage on charge failure at shmem's swapin. - * Calling hierarchical_reclaim is not enough because we should update - * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM. - * Moreover considering hierarchy, we should reclaim from the mem_over_limit, - * not from the memcg which this page would be charged to. - * try_charge_swapin does all of these works properly. - */ -int mem_cgroup_shmem_charge_fallback(struct page *page, - struct mm_struct *mm, - gfp_t gfp_mask) -{ - struct mem_cgroup *mem; - int ret; - - if (mem_cgroup_disabled()) - return 0; - - ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); - if (!ret) - mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */ - - return ret; -} - #ifdef CONFIG_DEBUG_VM static struct page_cgroup *lookup_page_cgroup_used(struct page *page) { @@ -5330,15 +5280,17 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma, pgoff = pte_to_pgoff(ptent); /* page is moved even if it's not RSS of this task(page-faulted). */ - if (!mapping_cap_swap_backed(mapping)) { /* normal file */ - page = find_get_page(mapping, pgoff); - } else { /* shmem/tmpfs file. we should take account of swap too. */ - swp_entry_t ent; - mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent); + page = find_get_page(mapping, pgoff); + +#ifdef CONFIG_SWAP + /* shmem/tmpfs may report page out on swap: account for that too. */ + if (radix_tree_exceptional_entry(page)) { + swp_entry_t swap = radix_to_swp_entry(page); if (do_swap_account) - entry->val = ent.val; + *entry = swap; + page = find_get_page(&swapper_space, swap.val); } - +#endif return page; } diff --git a/mm/shmem.c b/mm/shmem.c index 92f01d7cc150..13ef2d7e912d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -262,15 +262,11 @@ static int shmem_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp, void *expected) { - int error; + int error = 0; VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageSwapBacked(page)); - error = mem_cgroup_cache_charge(page, current->mm, - gfp & GFP_RECLAIM_MASK); - if (error) - goto out; if (!expected) error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); if (!error) { @@ -300,7 +296,6 @@ static int shmem_add_to_page_cache(struct page *page, } if (error) mem_cgroup_uncharge_cache_page(page); -out: return error; } @@ -660,7 +655,6 @@ int shmem_unuse(swp_entry_t swap, struct page *page) * Charge page using GFP_KERNEL while we can wait, before taking * the shmem_swaplist_mutex which might hold up shmem_writepage(). * Charged back to the user (not to caller) when swap account is used. - * shmem_add_to_page_cache() will be called with GFP_NOWAIT. */ error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); if (error) @@ -954,8 +948,11 @@ repeat: goto failed; } - error = shmem_add_to_page_cache(page, mapping, index, - gfp, swp_to_radix_entry(swap)); + error = mem_cgroup_cache_charge(page, current->mm, + gfp & GFP_RECLAIM_MASK); + if (!error) + error = shmem_add_to_page_cache(page, mapping, index, + gfp, swp_to_radix_entry(swap)); if (error) goto failed; @@ -990,8 +987,11 @@ repeat: SetPageSwapBacked(page); __set_page_locked(page); - error = shmem_add_to_page_cache(page, mapping, index, - gfp, NULL); + error = mem_cgroup_cache_charge(page, current->mm, + gfp & GFP_RECLAIM_MASK); + if (!error) + error = shmem_add_to_page_cache(page, mapping, index, + gfp, NULL); if (error) goto decused; lru_cache_add_anon(page); @@ -2442,42 +2442,6 @@ out4: return error; } -#ifdef CONFIG_CGROUP_MEM_RES_CTLR -/** - * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file - * @inode: the inode to be searched - * @index: the page offset to be searched - * @pagep: the pointer for the found page to be stored - * @swapp: the pointer for the found swap entry to be stored - * - * If a page is found, refcount of it is incremented. Callers should handle - * these refcount. - */ -void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, - struct page **pagep, swp_entry_t *swapp) -{ - struct shmem_inode_info *info = SHMEM_I(inode); - struct page *page = NULL; - swp_entry_t swap = {0}; - - if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) - goto out; - - spin_lock(&info->lock); -#ifdef CONFIG_SWAP - swap = shmem_get_swap(info, index); - if (swap.val) - page = find_get_page(&swapper_space, swap.val); - else -#endif - page = find_get_page(inode->i_mapping, index); - spin_unlock(&info->lock); -out: - *pagep = page; - *swapp = swap; -} -#endif - #else /* !CONFIG_SHMEM */ /* @@ -2523,31 +2487,6 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) } EXPORT_SYMBOL_GPL(shmem_truncate_range); -#ifdef CONFIG_CGROUP_MEM_RES_CTLR -/** - * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file - * @inode: the inode to be searched - * @index: the page offset to be searched - * @pagep: the pointer for the found page to be stored - * @swapp: the pointer for the found swap entry to be stored - * - * If a page is found, refcount of it is incremented. Callers should handle - * these refcount. - */ -void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, - struct page **pagep, swp_entry_t *swapp) -{ - struct page *page = NULL; - - if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) - goto out; - page = find_get_page(inode->i_mapping, index); -out: - *pagep = page; - *swapp = (swp_entry_t){0}; -} -#endif - #define shmem_vm_ops generic_file_vm_ops #define shmem_file_operations ramfs_file_operations #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) -- cgit v1.2.3 From 6922c0c7abd387374255801f7739624867e8acad Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:25 -0700 Subject: tmpfs: convert shmem_writepage and enable swap Convert shmem_writepage() to use shmem_delete_from_page_cache() to use shmem_radix_tree_replace() to substitute swap entry for page pointer atomically in the radix tree. As with shmem_add_to_page_cache(), it's not entirely satisfactory to be copying such code from delete_from_swap_cache, but again judged easier to sell than making its other callers go through the extras. Remove the toy implementation's shmem_put_swap() and shmem_get_swap(), now unreferenced, and the hack to disable swap: it's now good to go. The way things have worked out, info->lock no longer helps to guard the shmem_swaplist: we increment swapped under shmem_swaplist_mutex only. That global mutex exclusion between shmem_writepage() and shmem_unuse() is not pretty, and we ought to find another way; but it's been forced on us by recent race discoveries, not a consequence of this patchset. And what has become of the WARN_ON_ONCE(1) free_swap_and_cache() if a swap entry was found already present? That's no longer possible, the (unknown) one inserting this page into filecache would hit the swap entry occupying that slot. Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 88 ++++++++++++++++++++++++++------------------------------------ 1 file changed, 37 insertions(+), 51 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 13ef2d7e912d..0f094a258526 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -6,7 +6,8 @@ * 2000-2001 Christoph Rohland * 2000-2001 SAP AG * 2002 Red Hat Inc. - * Copyright (C) 2002-2005 Hugh Dickins. + * Copyright (C) 2002-2011 Hugh Dickins. + * Copyright (C) 2011 Google Inc. * Copyright (C) 2002-2005 VERITAS Software Corporation. * Copyright (C) 2004 Andi Kleen, SuSE Labs * @@ -219,19 +220,6 @@ static void shmem_recalc_inode(struct inode *inode) } } -static void shmem_put_swap(struct shmem_inode_info *info, pgoff_t index, - swp_entry_t swap) -{ - if (index < SHMEM_NR_DIRECT) - info->i_direct[index] = swap; -} - -static swp_entry_t shmem_get_swap(struct shmem_inode_info *info, pgoff_t index) -{ - return (index < SHMEM_NR_DIRECT) ? - info->i_direct[index] : (swp_entry_t){0}; -} - /* * Replace item expected in radix tree by a new item, while holding tree lock. */ @@ -299,6 +287,25 @@ static int shmem_add_to_page_cache(struct page *page, return error; } +/* + * Like delete_from_page_cache, but substitutes swap for page. + */ +static void shmem_delete_from_page_cache(struct page *page, void *radswap) +{ + struct address_space *mapping = page->mapping; + int error; + + spin_lock_irq(&mapping->tree_lock); + error = shmem_radix_tree_replace(mapping, page->index, page, radswap); + page->mapping = NULL; + mapping->nrpages--; + __dec_zone_page_state(page, NR_FILE_PAGES); + __dec_zone_page_state(page, NR_SHMEM); + spin_unlock_irq(&mapping->tree_lock); + page_cache_release(page); + BUG_ON(error); +} + /* * Like find_get_pages, but collecting swap entries as well as pages. */ @@ -664,14 +671,10 @@ int shmem_unuse(swp_entry_t swap, struct page *page) mutex_lock(&shmem_swaplist_mutex); list_for_each_safe(this, next, &shmem_swaplist) { info = list_entry(this, struct shmem_inode_info, swaplist); - if (!info->swapped) { - spin_lock(&info->lock); - if (!info->swapped) - list_del_init(&info->swaplist); - spin_unlock(&info->lock); - } if (info->swapped) found = shmem_unuse_inode(info, swap, page); + else + list_del_init(&info->swaplist); cond_resched(); if (found) break; @@ -694,10 +697,10 @@ out: static int shmem_writepage(struct page *page, struct writeback_control *wbc) { struct shmem_inode_info *info; - swp_entry_t swap, oswap; struct address_space *mapping; - pgoff_t index; struct inode *inode; + swp_entry_t swap; + pgoff_t index; BUG_ON(!PageLocked(page)); mapping = page->mapping; @@ -720,55 +723,38 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ goto redirty; } - - /* - * Disable even the toy swapping implementation, while we convert - * functions one by one to having swap entries in the radix tree. - */ - if (index < ULONG_MAX) - goto redirty; - swap = get_swap_page(); if (!swap.val) goto redirty; /* * Add inode to shmem_unuse()'s list of swapped-out inodes, - * if it's not already there. Do it now because we cannot take - * mutex while holding spinlock, and must do so before the page - * is moved to swap cache, when its pagelock no longer protects + * if it's not already there. Do it now before the page is + * moved to swap cache, when its pagelock no longer protects * the inode from eviction. But don't unlock the mutex until - * we've taken the spinlock, because shmem_unuse_inode() will - * prune a !swapped inode from the swaplist under both locks. + * we've incremented swapped, because shmem_unuse_inode() will + * prune a !swapped inode from the swaplist under this mutex. */ mutex_lock(&shmem_swaplist_mutex); if (list_empty(&info->swaplist)) list_add_tail(&info->swaplist, &shmem_swaplist); - spin_lock(&info->lock); - mutex_unlock(&shmem_swaplist_mutex); - - oswap = shmem_get_swap(info, index); - if (oswap.val) { - WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ - free_swap_and_cache(oswap); - shmem_put_swap(info, index, (swp_entry_t){0}); - info->swapped--; - } - shmem_recalc_inode(inode); - if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { - delete_from_page_cache(page); - shmem_put_swap(info, index, swap); - info->swapped++; swap_shmem_alloc(swap); + shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); + + spin_lock(&info->lock); + info->swapped++; + shmem_recalc_inode(inode); spin_unlock(&info->lock); + + mutex_unlock(&shmem_swaplist_mutex); BUG_ON(page_mapped(page)); swap_writepage(page, wbc); return 0; } - spin_unlock(&info->lock); + mutex_unlock(&shmem_swaplist_mutex); swapcache_free(swap, NULL); redirty: set_page_dirty(page); -- cgit v1.2.3 From 69f07ec938712b58755add82dd3d0b35f01317cc Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:26 -0700 Subject: tmpfs: use kmemdup for short symlinks But we've not yet removed the old swp_entry_t i_direct[16] from shmem_inode_info. That's because it was still being shared with the inline symlink. Remove it now (saving 64 or 128 bytes from shmem inode size), and use kmemdup() for short symlinks, say, those up to 128 bytes. I wonder why mpol_free_shared_policy() is done in shmem_destroy_inode() rather than shmem_evict_inode(), where we usually do such freeing? I guess it doesn't matter, and I'm not into NUMA mpol testing right now. Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Reviewed-by: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 0f094a258526..3a5be0feb6af 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -73,6 +73,9 @@ static struct vfsmount *shm_mnt; /* Pretend that each entry is of this size in directory's i_size */ #define BOGO_DIRENT_SIZE 20 +/* Symlink up to this size is kmalloc'ed instead of using a swappable page */ +#define SHORT_SYMLINK_LEN 128 + struct shmem_xattr { struct list_head list; /* anchored by shmem_inode_info->xattr_list */ char *name; /* xattr name */ @@ -585,7 +588,8 @@ static void shmem_evict_inode(struct inode *inode) list_del_init(&info->swaplist); mutex_unlock(&shmem_swaplist_mutex); } - } + } else + kfree(info->symlink); list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) { kfree(xattr->name); @@ -1173,7 +1177,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode #ifdef CONFIG_TMPFS static const struct inode_operations shmem_symlink_inode_operations; -static const struct inode_operations shmem_symlink_inline_operations; +static const struct inode_operations shmem_short_symlink_operations; static int shmem_write_begin(struct file *file, struct address_space *mapping, @@ -1638,10 +1642,13 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s info = SHMEM_I(inode); inode->i_size = len-1; - if (len <= SHMEM_SYMLINK_INLINE_LEN) { - /* do it inline */ - memcpy(info->inline_symlink, symname, len); - inode->i_op = &shmem_symlink_inline_operations; + if (len <= SHORT_SYMLINK_LEN) { + info->symlink = kmemdup(symname, len, GFP_KERNEL); + if (!info->symlink) { + iput(inode); + return -ENOMEM; + } + inode->i_op = &shmem_short_symlink_operations; } else { error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); if (error) { @@ -1664,9 +1671,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s return 0; } -static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) +static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd) { - nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink); + nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink); return NULL; } @@ -1914,9 +1921,9 @@ static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) } #endif /* CONFIG_TMPFS_XATTR */ -static const struct inode_operations shmem_symlink_inline_operations = { +static const struct inode_operations shmem_short_symlink_operations = { .readlink = generic_readlink, - .follow_link = shmem_follow_link_inline, + .follow_link = shmem_follow_short_symlink, #ifdef CONFIG_TMPFS_XATTR .setxattr = shmem_setxattr, .getxattr = shmem_getxattr, @@ -2259,10 +2266,8 @@ static void shmem_destroy_callback(struct rcu_head *head) static void shmem_destroy_inode(struct inode *inode) { - if ((inode->i_mode & S_IFMT) == S_IFREG) { - /* only struct inode is valid if it's an inline symlink */ + if ((inode->i_mode & S_IFMT) == S_IFREG) mpol_free_shared_policy(&SHMEM_I(inode)->policy); - } call_rcu(&inode->i_rcu, shmem_destroy_callback); } -- cgit v1.2.3 From 31475dd611209413bace21651a400afb91d0bd9d Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:27 -0700 Subject: mm: a few small updates for radix-swap Remove PageSwapBacked (!page_is_file_cache) cases from add_to_page_cache_locked() and add_to_page_cache_lru(): those pages now go through shmem_add_to_page_cache(). Remove a comment on maximum tmpfs size from fsstack_copy_inode_size(), and add a comment on swap entries to invalidate_mapping_pages(). And mincore_page() uses find_get_page() on what might be shmem or a tmpfs file: allow for a radix_tree_exceptional_entry(), and proceed to find_get_page() on swapper_space if so (oh, swapper_space needs #ifdef). Signed-off-by: Hugh Dickins Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 21 +++------------------ mm/mincore.c | 10 ++++++---- mm/truncate.c | 8 ++++++++ 3 files changed, 17 insertions(+), 22 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index 76bfb6460f57..96778faf82d5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -33,7 +33,6 @@ #include #include /* for BUG_ON(!in_atomic()) only */ #include -#include /* for page_is_file_cache() */ #include #include "internal.h" @@ -462,6 +461,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, int error; VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON(PageSwapBacked(page)); error = mem_cgroup_cache_charge(page, current->mm, gfp_mask & GFP_RECLAIM_MASK); @@ -479,8 +479,6 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, if (likely(!error)) { mapping->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES); - if (PageSwapBacked(page)) - __inc_zone_page_state(page, NR_SHMEM); spin_unlock_irq(&mapping->tree_lock); } else { page->mapping = NULL; @@ -502,22 +500,9 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, { int ret; - /* - * Splice_read and readahead add shmem/tmpfs pages into the page cache - * before shmem_readpage has a chance to mark them as SwapBacked: they - * need to go on the anon lru below, and mem_cgroup_cache_charge - * (called in add_to_page_cache) needs to know where they're going too. - */ - if (mapping_cap_swap_backed(mapping)) - SetPageSwapBacked(page); - ret = add_to_page_cache(page, mapping, offset, gfp_mask); - if (ret == 0) { - if (page_is_file_cache(page)) - lru_cache_add_file(page); - else - lru_cache_add_anon(page); - } + if (ret == 0) + lru_cache_add_file(page); return ret; } EXPORT_SYMBOL_GPL(add_to_page_cache_lru); diff --git a/mm/mincore.c b/mm/mincore.c index a4e6b9d75c76..733f1829b0dc 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -69,12 +69,14 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) * file will not get a swp_entry_t in its pte, but rather it is like * any other file mapping (ie. marked !present and faulted in with * tmpfs's .fault). So swapped out tmpfs mappings are tested here. - * - * However when tmpfs moves the page from pagecache and into swapcache, - * it is still in core, but the find_get_page below won't find it. - * No big deal, but make a note of it. */ page = find_get_page(mapping, pgoff); +#ifdef CONFIG_SWAP + if (radix_tree_exceptional_entry(page)) { + swp_entry_t swap = radix_to_swp_entry(page); + page = find_get_page(&swapper_space, swap.val); + } +#endif if (page) { present = PageUptodate(page); page_cache_release(page); diff --git a/mm/truncate.c b/mm/truncate.c index 232eb2736a79..b40ac6d4e86e 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -336,6 +336,14 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, unsigned long count = 0; int i; + /* + * Note: this function may get called on a shmem/tmpfs mapping: + * pagevec_lookup() might then return 0 prematurely (because it + * got a gangful of swap entries); but it's hardly worth worrying + * about - it can rarely have anything to free from such a mapping + * (most pages are dirty), and already skips over any difficulties. + */ + pagevec_init(&pvec, 0); while (index <= end && pagevec_lookup(&pvec, mapping, index, min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { -- cgit v1.2.3 From e504f3fdd63d486d45b18009e5a65f2e329acb0a Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:27 -0700 Subject: tmpfs radix_tree: locate_item to speed up swapoff We have already acknowledged that swapoff of a tmpfs file is slower than it was before conversion to the generic radix_tree: a little slower there will be acceptable, if the hotter paths are faster. But it was a shock to find swapoff of a 500MB file 20 times slower on my laptop, taking 10 minutes; and at that rate it significantly slows down my testing. Now, most of that turned out to be overhead from PROVE_LOCKING and PROVE_RCU: without those it was only 4 times slower than before; and more realistic tests on other machines don't fare as badly. I've tried a number of things to improve it, including tagging the swap entries, then doing lookup by tag: I'd expected that to halve the time, but in practice it's erratic, and often counter-productive. The only change I've so far found to make a consistent improvement, is to short-circuit the way we go back and forth, gang lookup packing entries into the array supplied, then shmem scanning that array for the target entry. Scanning in place doubles the speed, so it's now only twice as slow as before (or three times slower when the PROVEs are on). So, add radix_tree_locate_item() as an expedient, once-off, single-caller hack to do the lookup directly in place. #ifdef it on CONFIG_SHMEM and CONFIG_SWAP, as much to document its limited applicability as save space in other configurations. And, sadly, #include sched.h for cond_resched(). Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 38 +------------------------------------- 1 file changed, 1 insertion(+), 37 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 3a5be0feb6af..1c702f6f1241 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -356,42 +356,6 @@ export: return ret; } -/* - * Lockless lookup of swap entry in radix tree, avoiding refcount on pages. - */ -static pgoff_t shmem_find_swap(struct address_space *mapping, void *radswap) -{ - void **slots[PAGEVEC_SIZE]; - pgoff_t indices[PAGEVEC_SIZE]; - unsigned int nr_found; - -restart: - nr_found = 1; - indices[0] = -1; - while (nr_found) { - pgoff_t index = indices[nr_found - 1] + 1; - unsigned int i; - - rcu_read_lock(); - nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, - slots, indices, index, PAGEVEC_SIZE); - for (i = 0; i < nr_found; i++) { - void *item = radix_tree_deref_slot(slots[i]); - if (radix_tree_deref_retry(item)) { - rcu_read_unlock(); - goto restart; - } - if (item == radswap) { - rcu_read_unlock(); - return indices[i]; - } - } - rcu_read_unlock(); - cond_resched(); - } - return -1; -} - /* * Remove swap entry from radix tree, free the swap and its page cache. */ @@ -612,7 +576,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, int error; radswap = swp_to_radix_entry(swap); - index = shmem_find_swap(mapping, radswap); + index = radix_tree_locate_item(&mapping->page_tree, radswap); if (index == -1) return 0; -- cgit v1.2.3 From 8079b1c859c44f27d63da4951f5038a16589a563 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 3 Aug 2011 16:21:28 -0700 Subject: mm: clarify the radix_tree exceptional cases Make the radix_tree exceptional cases, mostly in filemap.c, clearer. It's hard to devise a suitable snappy name that illuminates the use by shmem/tmpfs for swap, while keeping filemap/pagecache/radix_tree generality. And akpm points out that /* radix_tree_deref_retry(page) */ comments look like calls that have been commented out for unknown reason. Skirt the naming difficulty by rearranging these blocks to handle the transient radix_tree_deref_retry(page) case first; then just explain the remaining shmem/tmpfs swap case in a comment. Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 66 ++++++++++++++++++++++++++++++++++++++++-------------------- mm/mincore.c | 1 + mm/shmem.c | 12 +++++++---- 3 files changed, 53 insertions(+), 26 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index 96778faf82d5..645a080ba4df 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -700,10 +700,14 @@ repeat: if (unlikely(!page)) goto out; if (radix_tree_exception(page)) { - if (radix_tree_exceptional_entry(page)) - goto out; - /* radix_tree_deref_retry(page) */ - goto repeat; + if (radix_tree_deref_retry(page)) + goto repeat; + /* + * Otherwise, shmem/tmpfs must be storing a swap entry + * here as an exceptional entry: so return it without + * attempting to raise page count. + */ + goto out; } if (!page_cache_get_speculative(page)) goto repeat; @@ -838,15 +842,21 @@ repeat: continue; if (radix_tree_exception(page)) { - if (radix_tree_exceptional_entry(page)) - continue; + if (radix_tree_deref_retry(page)) { + /* + * Transient condition which can only trigger + * when entry at index 0 moves out of or back + * to root: none yet gotten, safe to restart. + */ + WARN_ON(start | i); + goto restart; + } /* - * radix_tree_deref_retry(page): - * can only trigger when entry at index 0 moves out of - * or back to root: none yet gotten, safe to restart. + * Otherwise, shmem/tmpfs must be storing a swap entry + * here as an exceptional entry: so skip over it - + * we only reach this from invalidate_mapping_pages(). */ - WARN_ON(start | i); - goto restart; + continue; } if (!page_cache_get_speculative(page)) @@ -904,14 +914,20 @@ repeat: continue; if (radix_tree_exception(page)) { - if (radix_tree_exceptional_entry(page)) - break; + if (radix_tree_deref_retry(page)) { + /* + * Transient condition which can only trigger + * when entry at index 0 moves out of or back + * to root: none yet gotten, safe to restart. + */ + goto restart; + } /* - * radix_tree_deref_retry(page): - * can only trigger when entry at index 0 moves out of - * or back to root: none yet gotten, safe to restart. + * Otherwise, shmem/tmpfs must be storing a swap entry + * here as an exceptional entry: so stop looking for + * contiguous pages. */ - goto restart; + break; } if (!page_cache_get_speculative(page)) @@ -973,13 +989,19 @@ repeat: continue; if (radix_tree_exception(page)) { - BUG_ON(radix_tree_exceptional_entry(page)); + if (radix_tree_deref_retry(page)) { + /* + * Transient condition which can only trigger + * when entry at index 0 moves out of or back + * to root: none yet gotten, safe to restart. + */ + goto restart; + } /* - * radix_tree_deref_retry(page): - * can only trigger when entry at index 0 moves out of - * or back to root: none yet gotten, safe to restart. + * This function is never used on a shmem/tmpfs + * mapping, so a swap entry won't be found here. */ - goto restart; + BUG(); } if (!page_cache_get_speculative(page)) diff --git a/mm/mincore.c b/mm/mincore.c index 733f1829b0dc..636a86876ff2 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -72,6 +72,7 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) */ page = find_get_page(mapping, pgoff); #ifdef CONFIG_SWAP + /* shmem/tmpfs may return swap: account for swapcache page too. */ if (radix_tree_exceptional_entry(page)) { swp_entry_t swap = radix_to_swp_entry(page); page = find_get_page(&swapper_space, swap.val); diff --git a/mm/shmem.c b/mm/shmem.c index 1c702f6f1241..32f6763f16fb 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -332,10 +332,14 @@ repeat: if (unlikely(!page)) continue; if (radix_tree_exception(page)) { - if (radix_tree_exceptional_entry(page)) - goto export; - /* radix_tree_deref_retry(page) */ - goto restart; + if (radix_tree_deref_retry(page)) + goto restart; + /* + * Otherwise, we must be storing a swap entry + * here as an exceptional entry: so return it + * without attempting to raise page count. + */ + goto export; } if (!page_cache_get_speculative(page)) goto repeat; -- cgit v1.2.3 From 83835b3d9aec8e9f666d8223d8a386814f756266 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 22 Jul 2011 15:26:05 +0200 Subject: slab, lockdep: Annotate slab -> rcu -> debug_object -> slab Lockdep thinks there's lock recursion through: kmem_cache_free() cache_flusharray() spin_lock(&l3->list_lock) <----------------. free_block() | slab_destroy() | call_rcu() | debug_object_activate() | debug_object_init() | __debug_object_init() | kmem_cache_alloc() | cache_alloc_refill() | spin_lock(&l3->list_lock) --' Now debug objects doesn't use SLAB_DESTROY_BY_RCU and hence there is no actual possibility of recursing. Luckily debug objects marks it slab with SLAB_DEBUG_OBJECTS so we can identify the thing. Mark all SLAB_DEBUG_OBJECTS (all one!) slab caches with a special lockdep key so that lockdep sees its a different cachep. Also add a WARN on trying to create a SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS cache, to avoid possible future trouble. Reported-and-tested-by: Sebastian Siewior [ fixes to the initial patch ] Reported-by: Thomas Gleixner Acked-by: Pekka Enberg Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1311341165.27400.58.camel@twins Signed-off-by: Ingo Molnar --- mm/slab.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 68 insertions(+), 18 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 95947400702b..0703578fb4d7 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -622,6 +622,51 @@ int slab_is_available(void) static struct lock_class_key on_slab_l3_key; static struct lock_class_key on_slab_alc_key; +static struct lock_class_key debugobj_l3_key; +static struct lock_class_key debugobj_alc_key; + +static void slab_set_lock_classes(struct kmem_cache *cachep, + struct lock_class_key *l3_key, struct lock_class_key *alc_key, + int q) +{ + struct array_cache **alc; + struct kmem_list3 *l3; + int r; + + l3 = cachep->nodelists[q]; + if (!l3) + return; + + lockdep_set_class(&l3->list_lock, l3_key); + alc = l3->alien; + /* + * FIXME: This check for BAD_ALIEN_MAGIC + * should go away when common slab code is taught to + * work even without alien caches. + * Currently, non NUMA code returns BAD_ALIEN_MAGIC + * for alloc_alien_cache, + */ + if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) + return; + for_each_node(r) { + if (alc[r]) + lockdep_set_class(&alc[r]->lock, alc_key); + } +} + +static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) +{ + slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node); +} + +static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) +{ + int node; + + for_each_online_node(node) + slab_set_debugobj_lock_classes_node(cachep, node); +} + static void init_node_lock_keys(int q) { struct cache_sizes *s = malloc_sizes; @@ -630,29 +675,14 @@ static void init_node_lock_keys(int q) return; for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { - struct array_cache **alc; struct kmem_list3 *l3; - int r; l3 = s->cs_cachep->nodelists[q]; if (!l3 || OFF_SLAB(s->cs_cachep)) continue; - lockdep_set_class(&l3->list_lock, &on_slab_l3_key); - alc = l3->alien; - /* - * FIXME: This check for BAD_ALIEN_MAGIC - * should go away when common slab code is taught to - * work even without alien caches. - * Currently, non NUMA code returns BAD_ALIEN_MAGIC - * for alloc_alien_cache, - */ - if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) - continue; - for_each_node(r) { - if (alc[r]) - lockdep_set_class(&alc[r]->lock, - &on_slab_alc_key); - } + + slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key, + &on_slab_alc_key, q); } } @@ -671,6 +701,14 @@ static void init_node_lock_keys(int q) static inline void init_lock_keys(void) { } + +static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) +{ +} + +static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) +{ +} #endif /* @@ -1264,6 +1302,8 @@ static int __cpuinit cpuup_prepare(long cpu) spin_unlock_irq(&l3->list_lock); kfree(shared); free_alien_cache(alien); + if (cachep->flags & SLAB_DEBUG_OBJECTS) + slab_set_debugobj_lock_classes_node(cachep, node); } init_node_lock_keys(node); @@ -2426,6 +2466,16 @@ kmem_cache_create (const char *name, size_t size, size_t align, goto oops; } + if (flags & SLAB_DEBUG_OBJECTS) { + /* + * Would deadlock through slab_destroy()->call_rcu()-> + * debug_object_activate()->kmem_cache_alloc(). + */ + WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU); + + slab_set_debugobj_lock_classes(cachep); + } + /* cache setup completed, link it into the list */ list_add(&cachep->next, &cache_chain); oops: -- cgit v1.2.3 From 30765b92ada267c5395fc788623cb15233276f5c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 28 Jul 2011 23:22:56 +0200 Subject: slab, lockdep: Annotate the locks before using them Fernando found we hit the regular OFF_SLAB 'recursion' before we annotate the locks, cure this. The relevant portion of the stack-trace: > [ 0.000000] [] rt_spin_lock+0x50/0x56 > [ 0.000000] [] __cache_free+0x43/0xc3 > [ 0.000000] [] kmem_cache_free+0x6c/0xdc > [ 0.000000] [] slab_destroy+0x4f/0x53 > [ 0.000000] [] free_block+0x94/0xc1 > [ 0.000000] [] do_tune_cpucache+0x10b/0x2bb > [ 0.000000] [] enable_cpucache+0x7b/0xa7 > [ 0.000000] [] kmem_cache_init_late+0x1f/0x61 > [ 0.000000] [] start_kernel+0x24c/0x363 > [ 0.000000] [] i386_start_kernel+0xa9/0xaf Reported-by: Fernando Lopez-Lezcano Acked-by: Pekka Enberg Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1311888176.2617.379.camel@laptop Signed-off-by: Ingo Molnar --- mm/slab.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 0703578fb4d7..6d90a091fdca 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1666,6 +1666,9 @@ void __init kmem_cache_init_late(void) { struct kmem_cache *cachep; + /* Annotate slab for lockdep -- annotate the malloc caches */ + init_lock_keys(); + /* 6) resize the head arrays to their final sizes */ mutex_lock(&cache_chain_mutex); list_for_each_entry(cachep, &cache_chain, next) @@ -1676,9 +1679,6 @@ void __init kmem_cache_init_late(void) /* Done! */ g_cpucache_up = FULL; - /* Annotate slab for lockdep -- annotate the malloc caches */ - init_lock_keys(); - /* * Register a cpu startup notifier callback that initializes * cpu_cache_get for all new cpus -- cgit v1.2.3 From 6fbabb20faed9c08f8b96de4182bd721cbd1cfcf Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 8 Aug 2011 11:16:56 -0500 Subject: slub: Fix full list corruption if debugging is on When a slab is freed by __slab_free() and the slab can only contain a single object ever then it was full (and therefore not on the partial lists but on the full list in the debug case) before we reached slab_empty. This caused the following full list corruption when SLUB debugging was enabled: [ 5913.233035] ------------[ cut here ]------------ [ 5913.233097] WARNING: at lib/list_debug.c:53 __list_del_entry+0x8d/0x98() [ 5913.233101] Hardware name: Adamo 13 [ 5913.233105] list_del corruption. prev->next should be ffffea000434fd20, but was ffffea0004199520 [ 5913.233108] Modules linked in: nfs fscache fuse ebtable_nat ebtables ppdev parport_pc lp parport ipt_MASQUERADE iptable_nat nf_nat nfsd lockd nfs_acl auth_rpcgss xt_CHECKSUM sunrpc iptable_mangle bridge stp llc cpufreq_ondemand acpi_cpufreq freq_table mperf ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 ip6table_filter ip6_tables rfcomm bnep arc4 iwlagn snd_hda_codec_hdmi snd_hda_codec_idt snd_hda_intel btusb mac80211 snd_hda_codec bluetooth snd_hwdep snd_seq snd_seq_device snd_pcm usb_debug dell_wmi sparse_keymap cdc_ether usbnet cdc_acm uvcvideo cdc_wdm mii cfg80211 snd_timer dell_laptop videodev dcdbas snd microcode v4l2_compat_ioctl32 soundcore joydev tg3 pcspkr snd_page_alloc iTCO_wdt i2c_i801 rfkill iTCO_vendor_support wmi virtio_net kvm_intel kvm ipv6 xts gf128mul dm_crypt i915 drm_kms_helper drm i2c_algo_bit i2c_core video [last unloaded: scsi_wait_scan] [ 5913.233213] Pid: 0, comm: swapper Not tainted 3.0.0+ #127 [ 5913.233213] Call Trace: [ 5913.233213] [] warn_slowpath_common+0x83/0x9b [ 5913.233213] [] warn_slowpath_fmt+0x46/0x48 [ 5913.233213] [] __list_del_entry+0x8d/0x98 [ 5913.233213] [] list_del+0xe/0x2d [ 5913.233213] [] __slab_free+0x1db/0x235 [ 5913.233213] [] ? bvec_free_bs+0x35/0x37 [ 5913.233213] [] ? bvec_free_bs+0x35/0x37 [ 5913.233213] [] ? bvec_free_bs+0x35/0x37 [ 5913.233213] [] kmem_cache_free+0x88/0x102 [ 5913.233213] [] bvec_free_bs+0x35/0x37 [ 5913.233213] [] bio_free+0x34/0x64 [ 5913.233213] [] dm_bio_destructor+0x12/0x14 [ 5913.233213] [] bio_put+0x2b/0x2d [ 5913.233213] [] clone_endio+0x9e/0xb4 [ 5913.233213] [] bio_endio+0x2d/0x2f [ 5913.233213] [] crypt_dec_pending+0x5c/0x8b [dm_crypt] [ 5913.233213] [] crypt_endio+0x78/0x81 [dm_crypt] [ Full discussion here: https://lkml.org/lkml/2011/8/4/375 ] Make sure that we remove such a slab also from the full lists. Reported-and-tested-by: Dave Jones Reported-and-tested-by: Xiaotian Feng Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index eb5a8f93338a..5436fe2fbf9c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2387,11 +2387,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page, slab_empty: if (prior) { /* - * Slab still on the partial list. + * Slab on the partial list. */ remove_partial(n, page); stat(s, FREE_REMOVE_PARTIAL); - } + } else + /* Slab must be on the full list */ + remove_full(s, page); spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); -- cgit v1.2.3 From ef62fb32b7b21731e41aea3c1e08bcdb407c9eb9 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 7 Aug 2011 18:30:38 +0900 Subject: slub: fix check_bytes() for slub debugging The check_bytes() function is used by slub debugging. It returns a pointer to the first unmatching byte for a character in the given memory area. If the character for matching byte is greater than 0x80, check_bytes() doesn't work. Becuase 64-bit pattern is generated as below. value64 = value | value << 8 | value << 16 | value << 24; value64 = value64 | value64 << 32; The integer promotions are performed and sign-extended as the type of value is u8. The upper 32 bits of value64 is 0xffffffff in the first line, and the second line has no effect. This fixes the 64-bit pattern generation. Signed-off-by: Akinobu Mita Cc: Christoph Lameter Cc: Matt Mackall Reviewed-by: Marcin Slusarz Acked-by: Eric Dumazet Signed-off-by: Pekka Enberg --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 5436fe2fbf9c..6da68597bde8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -701,7 +701,7 @@ static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes) return check_bytes8(start, value, bytes); value64 = value | value << 8 | value << 16 | value << 24; - value64 = value64 | value64 << 32; + value64 = (value64 & 0xffffffff) | value64 << 32; prefix = 8 - ((unsigned long)start) % 8; if (prefix) { -- cgit v1.2.3 From 81107188f123e3c2217ac2f2feb2a1147904c62f Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 9 Aug 2011 13:01:32 -0500 Subject: slub: Fix partial count comparison confusion deactivate_slab() has the comparison if more than the minimum number of partial pages are in the partial list wrong. An effect of this may be that empty pages are not freed from deactivate_slab(). The result could be an OOM due to growth of the partial slabs per node. Frees mostly occur from __slab_free which is okay so this would only affect use cases where a lot of switching around of per cpu slabs occur. Switching per cpu slabs occurs with high frequency if debugging options are enabled. Reported-and-tested-by: Xiaotian Feng Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 6da68597bde8..9f662d70eb47 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1854,7 +1854,7 @@ redo: new.frozen = 0; - if (!new.inuse && n->nr_partial < s->min_partial) + if (!new.inuse && n->nr_partial > s->min_partial) m = M_FREE; else if (new.freelist) { m = M_PARTIAL; -- cgit v1.2.3 From 130655ef097940b627e8e04fa7c6f3b51cf24f85 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Tue, 23 Aug 2011 08:36:59 +0800 Subject: slub: add slab with one free object to partial list tail The slab has just one free object, adding it to partial list head doesn't make sense. And it can cause lock contentation. For example, 1. CPU takes the slab from partial list 2. fetch an object 3. switch to another slab 4. free an object, then the slab is added to partial list again In this way n->list_lock will be heavily contended. In fact, Alex had a hackbench regression. 3.1-rc1 performance drops about 70% against 3.0. This patch fixes it. Acked-by: Christoph Lameter Reported-by: Alex Shi Signed-off-by: Shaohua Li Signed-off-by: Shaohua Li Signed-off-by: Pekka Enberg --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 9f662d70eb47..7c54fe83a90c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, */ if (unlikely(!prior)) { remove_full(s, page); - add_partial(n, page, 0); + add_partial(n, page, 1); stat(s, FREE_ADD_PARTIAL); } } -- cgit v1.2.3 From 136333d104bd3a62d783b0ac3d0f32ac0108c5d0 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 24 Aug 2011 08:57:52 +0800 Subject: slub: explicitly document position of inserting slab to partial list Adding slab to partial list head/tail is sensitive to performance. So explicitly uses DEACTIVATE_TO_TAIL/DEACTIVATE_TO_HEAD to document it to avoid we get it wrong. Acked-by: Christoph Lameter Signed-off-by: Shaohua Li Signed-off-by: Shaohua Li Signed-off-by: Pekka Enberg --- mm/slub.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 7c54fe83a90c..91a120f185d1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1534,7 +1534,7 @@ static inline void add_partial(struct kmem_cache_node *n, struct page *page, int tail) { n->nr_partial++; - if (tail) + if (tail == DEACTIVATE_TO_TAIL) list_add_tail(&page->lru, &n->partial); else list_add(&page->lru, &n->partial); @@ -1781,13 +1781,13 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) enum slab_modes l = M_NONE, m = M_NONE; void *freelist; void *nextfree; - int tail = 0; + int tail = DEACTIVATE_TO_HEAD; struct page new; struct page old; if (page->freelist) { stat(s, DEACTIVATE_REMOTE_FREES); - tail = 1; + tail = DEACTIVATE_TO_TAIL; } c->tid = next_tid(c->tid); @@ -1893,7 +1893,7 @@ redo: if (m == M_PARTIAL) { add_partial(n, page, tail); - stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); + stat(s, tail); } else if (m == M_FULL) { @@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, */ if (unlikely(!prior)) { remove_full(s, page); - add_partial(n, page, 1); + add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } } @@ -2695,7 +2695,7 @@ static void early_kmem_cache_node_alloc(int node) init_kmem_cache_node(n, kmem_cache_node); inc_slabs_node(kmem_cache_node, node, page->objects); - add_partial(n, page, 0); + add_partial(n, page, DEACTIVATE_TO_HEAD); } static void free_kmem_cache_nodes(struct kmem_cache *s) -- cgit v1.2.3