summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c242
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/memcontrol.c1
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/page_cgroup.c33
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slab.c52
-rw-r--r--mm/slub.c29
-rw-r--r--mm/vmalloc.c36
-rw-r--r--mm/vmstat.c69
11 files changed, 207 insertions, 274 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index ab8553658af3..f3e5f8944d17 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2029,48 +2029,8 @@ int pagecache_write_begin(struct file *file, struct address_space *mapping,
{
const struct address_space_operations *aops = mapping->a_ops;
- if (aops->write_begin) {
- return aops->write_begin(file, mapping, pos, len, flags,
+ return aops->write_begin(file, mapping, pos, len, flags,
pagep, fsdata);
- } else {
- int ret;
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
- unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
- struct inode *inode = mapping->host;
- struct page *page;
-again:
- page = __grab_cache_page(mapping, index);
- *pagep = page;
- if (!page)
- return -ENOMEM;
-
- if (flags & AOP_FLAG_UNINTERRUPTIBLE && !PageUptodate(page)) {
- /*
- * There is no way to resolve a short write situation
- * for a !Uptodate page (except by double copying in
- * the caller done by generic_perform_write_2copy).
- *
- * Instead, we have to bring it uptodate here.
- */
- ret = aops->readpage(file, page);
- page_cache_release(page);
- if (ret) {
- if (ret == AOP_TRUNCATED_PAGE)
- goto again;
- return ret;
- }
- goto again;
- }
-
- ret = aops->prepare_write(file, page, offset, offset+len);
- if (ret) {
- unlock_page(page);
- page_cache_release(page);
- if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
- }
- return ret;
- }
}
EXPORT_SYMBOL(pagecache_write_begin);
@@ -2079,32 +2039,9 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata)
{
const struct address_space_operations *aops = mapping->a_ops;
- int ret;
-
- if (aops->write_end) {
- mark_page_accessed(page);
- ret = aops->write_end(file, mapping, pos, len, copied,
- page, fsdata);
- } else {
- unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
- struct inode *inode = mapping->host;
-
- flush_dcache_page(page);
- ret = aops->commit_write(file, page, offset, offset+len);
- unlock_page(page);
- mark_page_accessed(page);
- page_cache_release(page);
-
- if (ret < 0) {
- if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
- } else if (ret > 0)
- ret = min_t(size_t, copied, ret);
- else
- ret = copied;
- }
- return ret;
+ mark_page_accessed(page);
+ return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
}
EXPORT_SYMBOL(pagecache_write_end);
@@ -2226,174 +2163,6 @@ repeat:
}
EXPORT_SYMBOL(__grab_cache_page);
-static ssize_t generic_perform_write_2copy(struct file *file,
- struct iov_iter *i, loff_t pos)
-{
- struct address_space *mapping = file->f_mapping;
- const struct address_space_operations *a_ops = mapping->a_ops;
- struct inode *inode = mapping->host;
- long status = 0;
- ssize_t written = 0;
-
- do {
- struct page *src_page;
- struct page *page;
- pgoff_t index; /* Pagecache index for current page */
- unsigned long offset; /* Offset into pagecache page */
- unsigned long bytes; /* Bytes to write to page */
- size_t copied; /* Bytes copied from user */
-
- offset = (pos & (PAGE_CACHE_SIZE - 1));
- index = pos >> PAGE_CACHE_SHIFT;
- bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
- iov_iter_count(i));
-
- /*
- * a non-NULL src_page indicates that we're doing the
- * copy via get_user_pages and kmap.
- */
- src_page = NULL;
-
- /*
- * Bring in the user page that we will copy from _first_.
- * Otherwise there's a nasty deadlock on copying from the
- * same page as we're writing to, without it being marked
- * up-to-date.
- *
- * Not only is this an optimisation, but it is also required
- * to check that the address is actually valid, when atomic
- * usercopies are used, below.
- */
- if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
- status = -EFAULT;
- break;
- }
-
- page = __grab_cache_page(mapping, index);
- if (!page) {
- status = -ENOMEM;
- break;
- }
-
- /*
- * non-uptodate pages cannot cope with short copies, and we
- * cannot take a pagefault with the destination page locked.
- * So pin the source page to copy it.
- */
- if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) {
- unlock_page(page);
-
- src_page = alloc_page(GFP_KERNEL);
- if (!src_page) {
- page_cache_release(page);
- status = -ENOMEM;
- break;
- }
-
- /*
- * Cannot get_user_pages with a page locked for the
- * same reason as we can't take a page fault with a
- * page locked (as explained below).
- */
- copied = iov_iter_copy_from_user(src_page, i,
- offset, bytes);
- if (unlikely(copied == 0)) {
- status = -EFAULT;
- page_cache_release(page);
- page_cache_release(src_page);
- break;
- }
- bytes = copied;
-
- lock_page(page);
- /*
- * Can't handle the page going uptodate here, because
- * that means we would use non-atomic usercopies, which
- * zero out the tail of the page, which can cause
- * zeroes to become transiently visible. We could just
- * use a non-zeroing copy, but the APIs aren't too
- * consistent.
- */
- if (unlikely(!page->mapping || PageUptodate(page))) {
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(src_page);
- continue;
- }
- }
-
- status = a_ops->prepare_write(file, page, offset, offset+bytes);
- if (unlikely(status))
- goto fs_write_aop_error;
-
- if (!src_page) {
- /*
- * Must not enter the pagefault handler here, because
- * we hold the page lock, so we might recursively
- * deadlock on the same lock, or get an ABBA deadlock
- * against a different lock, or against the mmap_sem
- * (which nests outside the page lock). So increment
- * preempt count, and use _atomic usercopies.
- *
- * The page is uptodate so we are OK to encounter a
- * short copy: if unmodified parts of the page are
- * marked dirty and written out to disk, it doesn't
- * really matter.
- */
- pagefault_disable();
- copied = iov_iter_copy_from_user_atomic(page, i,
- offset, bytes);
- pagefault_enable();
- } else {
- void *src, *dst;
- src = kmap_atomic(src_page, KM_USER0);
- dst = kmap_atomic(page, KM_USER1);
- memcpy(dst + offset, src + offset, bytes);
- kunmap_atomic(dst, KM_USER1);
- kunmap_atomic(src, KM_USER0);
- copied = bytes;
- }
- flush_dcache_page(page);
-
- status = a_ops->commit_write(file, page, offset, offset+bytes);
- if (unlikely(status < 0))
- goto fs_write_aop_error;
- if (unlikely(status > 0)) /* filesystem did partial write */
- copied = min_t(size_t, copied, status);
-
- unlock_page(page);
- mark_page_accessed(page);
- page_cache_release(page);
- if (src_page)
- page_cache_release(src_page);
-
- iov_iter_advance(i, copied);
- pos += copied;
- written += copied;
-
- balance_dirty_pages_ratelimited(mapping);
- cond_resched();
- continue;
-
-fs_write_aop_error:
- unlock_page(page);
- page_cache_release(page);
- if (src_page)
- page_cache_release(src_page);
-
- /*
- * prepare_write() may have instantiated a few blocks
- * outside i_size. Trim these off again. Don't need
- * i_size_read because we hold i_mutex.
- */
- if (pos + bytes > inode->i_size)
- vmtruncate(inode, inode->i_size);
- break;
- } while (iov_iter_count(i));
-
- return written ? written : status;
-}
-
static ssize_t generic_perform_write(struct file *file,
struct iov_iter *i, loff_t pos)
{
@@ -2494,10 +2263,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
struct iov_iter i;
iov_iter_init(&i, iov, nr_segs, count, written);
- if (a_ops->write_begin)
- status = generic_perform_write(file, &i, pos);
- else
- status = generic_perform_write_2copy(file, &i, pos);
+ status = generic_perform_write(file, &i, pos);
if (likely(status >= 0)) {
written += status;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ce8cbb29860b..421aee99b84a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/seq_file.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/mmu_notifier.h>
@@ -1455,10 +1456,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
#endif /* CONFIG_SYSCTL */
-int hugetlb_report_meminfo(char *buf)
+void hugetlb_report_meminfo(struct seq_file *m)
{
struct hstate *h = &default_hstate;
- return sprintf(buf,
+ seq_printf(m,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
"HugePages_Rsvd: %5lu\n"
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d4a92b63e98e..866dcc7eeb0c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1088,7 +1088,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
int node;
if (unlikely((cont->parent) == NULL)) {
- page_cgroup_init();
mem = &init_mem_cgroup;
} else {
mem = mem_cgroup_alloc();
diff --git a/mm/mmap.c b/mm/mmap.c
index 74f4d158022e..de14ac21e5b5 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -175,7 +175,8 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
/* Don't let a single process grow too big:
leave 3% of the size of this process for other processes */
- allowed -= mm->total_vm / 32;
+ if (mm)
+ allowed -= mm->total_vm / 32;
/*
* cast `allowed' as a signed long because vm_committed_space
diff --git a/mm/nommu.c b/mm/nommu.c
index 2696b24f2bb3..7695dc850785 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1454,7 +1454,8 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
/* Don't let a single process grow too big:
leave 3% of the size of this process for other processes */
- allowed -= current->mm->total_vm / 32;
+ if (mm)
+ allowed -= mm->total_vm / 32;
/*
* cast `allowed' as a signed long because vm_committed_space
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 5d86550701f2..f59d797dc5a9 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -4,7 +4,10 @@
#include <linux/bit_spinlock.h>
#include <linux/page_cgroup.h>
#include <linux/hash.h>
+#include <linux/slab.h>
#include <linux/memory.h>
+#include <linux/vmalloc.h>
+#include <linux/cgroup.h>
static void __meminit
__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
@@ -66,6 +69,9 @@ void __init page_cgroup_init(void)
int nid, fail;
+ if (mem_cgroup_subsys.disabled)
+ return;
+
for_each_online_node(nid) {
fail = alloc_node_page_cgroup(nid);
if (fail)
@@ -106,9 +112,14 @@ int __meminit init_section_page_cgroup(unsigned long pfn)
nid = page_to_nid(pfn_to_page(pfn));
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
- base = kmalloc_node(table_size, GFP_KERNEL, nid);
- if (!base)
- base = vmalloc_node(table_size, nid);
+ if (slab_is_available()) {
+ base = kmalloc_node(table_size, GFP_KERNEL, nid);
+ if (!base)
+ base = vmalloc_node(table_size, nid);
+ } else {
+ base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size,
+ PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ }
if (!base) {
printk(KERN_ERR "page cgroup allocation failure\n");
@@ -135,11 +146,16 @@ void __free_page_cgroup(unsigned long pfn)
if (!ms || !ms->page_cgroup)
return;
base = ms->page_cgroup + pfn;
- ms->page_cgroup = NULL;
- if (is_vmalloc_addr(base))
+ if (is_vmalloc_addr(base)) {
vfree(base);
- else
- kfree(base);
+ ms->page_cgroup = NULL;
+ } else {
+ struct page *page = virt_to_page(base);
+ if (!PageReserved(page)) { /* Is bootmem ? */
+ kfree(base);
+ ms->page_cgroup = NULL;
+ }
+ }
}
int online_page_cgroup(unsigned long start_pfn,
@@ -213,6 +229,9 @@ void __init page_cgroup_init(void)
unsigned long pfn;
int fail = 0;
+ if (mem_cgroup_subsys.disabled)
+ return;
+
for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
if (!pfn_present(pfn))
continue;
diff --git a/mm/shmem.c b/mm/shmem.c
index d38d7e61fcd0..0ed075215e5f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -161,8 +161,8 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
*/
static inline int shmem_acct_size(unsigned long flags, loff_t size)
{
- return (flags & VM_ACCOUNT)?
- security_vm_enough_memory(VM_ACCT(size)): 0;
+ return (flags & VM_ACCOUNT) ?
+ security_vm_enough_memory_kern(VM_ACCT(size)) : 0;
}
static inline void shmem_unacct_size(unsigned long flags, loff_t size)
@@ -179,8 +179,8 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
*/
static inline int shmem_acct_block(unsigned long flags)
{
- return (flags & VM_ACCOUNT)?
- 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
+ return (flags & VM_ACCOUNT) ?
+ 0 : security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE));
}
static inline void shmem_unacct_blocks(unsigned long flags, long pages)
diff --git a/mm/slab.c b/mm/slab.c
index e76eee466886..09187517f9dc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -95,6 +95,7 @@
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/cpuset.h>
+#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/kallsyms.h>
@@ -4258,7 +4259,7 @@ static int s_show(struct seq_file *m, void *p)
* + further values on SMP and with statistics enabled
*/
-const struct seq_operations slabinfo_op = {
+static const struct seq_operations slabinfo_op = {
.start = s_start,
.next = s_next,
.stop = s_stop,
@@ -4315,6 +4316,19 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
return res;
}
+static int slabinfo_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &slabinfo_op);
+}
+
+static const struct file_operations proc_slabinfo_operations = {
+ .open = slabinfo_open,
+ .read = seq_read,
+ .write = slabinfo_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
#ifdef CONFIG_DEBUG_SLAB_LEAK
static void *leaks_start(struct seq_file *m, loff_t *pos)
@@ -4443,13 +4457,47 @@ static int leaks_show(struct seq_file *m, void *p)
return 0;
}
-const struct seq_operations slabstats_op = {
+static const struct seq_operations slabstats_op = {
.start = leaks_start,
.next = s_next,
.stop = s_stop,
.show = leaks_show,
};
+
+static int slabstats_open(struct inode *inode, struct file *file)
+{
+ unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ int ret = -ENOMEM;
+ if (n) {
+ ret = seq_open(file, &slabstats_op);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ *n = PAGE_SIZE / (2 * sizeof(unsigned long));
+ m->private = n;
+ n = NULL;
+ }
+ kfree(n);
+ }
+ return ret;
+}
+
+static const struct file_operations proc_slabstats_operations = {
+ .open = slabstats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+#endif
+
+static int __init slab_proc_init(void)
+{
+ proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+ proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
#endif
+ return 0;
+}
+module_init(slab_proc_init);
#endif
/**
diff --git a/mm/slub.c b/mm/slub.c
index 0c83e6afe7b2..7ad489af9561 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
+#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
@@ -4417,14 +4418,6 @@ __initcall(slab_sysfs_init);
* The /proc/slabinfo ABI
*/
#ifdef CONFIG_SLABINFO
-
-ssize_t slabinfo_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- return -EINVAL;
-}
-
-
static void print_slabinfo_header(struct seq_file *m)
{
seq_puts(m, "slabinfo - version: 2.1\n");
@@ -4492,11 +4485,29 @@ static int s_show(struct seq_file *m, void *p)
return 0;
}
-const struct seq_operations slabinfo_op = {
+static const struct seq_operations slabinfo_op = {
.start = s_start,
.next = s_next,
.stop = s_stop,
.show = s_show,
};
+static int slabinfo_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &slabinfo_op);
+}
+
+static const struct file_operations proc_slabinfo_operations = {
+ .open = slabinfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init slab_proc_init(void)
+{
+ proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
+ return 0;
+}
+module_init(slab_proc_init);
#endif /* CONFIG_SLABINFO */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 65ae576030da..f1cc03bbf6ac 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
@@ -896,7 +897,8 @@ EXPORT_SYMBOL(vm_unmap_ram);
* @count: number of pages
* @node: prefer to allocate data structures on this node
* @prot: memory protection to use. PAGE_KERNEL for regular RAM
- * @returns: a pointer to the address that has been mapped, or NULL on failure
+ *
+ * Returns: a pointer to the address that has been mapped, or %NULL on failure
*/
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
{
@@ -1718,11 +1720,41 @@ static int s_show(struct seq_file *m, void *p)
return 0;
}
-const struct seq_operations vmalloc_op = {
+static const struct seq_operations vmalloc_op = {
.start = s_start,
.next = s_next,
.stop = s_stop,
.show = s_show,
};
+
+static int vmalloc_open(struct inode *inode, struct file *file)
+{
+ unsigned int *ptr = NULL;
+ int ret;
+
+ if (NUMA_BUILD)
+ ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
+ ret = seq_open(file, &vmalloc_op);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ m->private = ptr;
+ } else
+ kfree(ptr);
+ return ret;
+}
+
+static const struct file_operations proc_vmalloc_operations = {
+ .open = vmalloc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+static int __init proc_vmalloc_init(void)
+{
+ proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
+ return 0;
+}
+module_init(proc_vmalloc_init);
#endif
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 9343227c5c60..c3ccfda23adc 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -8,7 +8,7 @@
* Copyright (C) 2006 Silicon Graphics, Inc.,
* Christoph Lameter <christoph@lameter.com>
*/
-
+#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -384,7 +384,7 @@ void zone_statistics(struct zone *preferred_zone, struct zone *z)
#endif
#ifdef CONFIG_PROC_FS
-
+#include <linux/proc_fs.h>
#include <linux/seq_file.h>
static char * const migratetype_names[MIGRATE_TYPES] = {
@@ -581,20 +581,44 @@ static int pagetypeinfo_show(struct seq_file *m, void *arg)
return 0;
}
-const struct seq_operations fragmentation_op = {
+static const struct seq_operations fragmentation_op = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = frag_show,
};
-const struct seq_operations pagetypeinfo_op = {
+static int fragmentation_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &fragmentation_op);
+}
+
+static const struct file_operations fragmentation_file_operations = {
+ .open = fragmentation_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const struct seq_operations pagetypeinfo_op = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = pagetypeinfo_show,
};
+static int pagetypeinfo_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &pagetypeinfo_op);
+}
+
+static const struct file_operations pagetypeinfo_file_ops = {
+ .open = pagetypeinfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
#ifdef CONFIG_ZONE_DMA
#define TEXT_FOR_DMA(xx) xx "_dma",
#else
@@ -771,7 +795,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
return 0;
}
-const struct seq_operations zoneinfo_op = {
+static const struct seq_operations zoneinfo_op = {
.start = frag_start, /* iterate over all zones. The same as in
* fragmentation. */
.next = frag_next,
@@ -779,6 +803,18 @@ const struct seq_operations zoneinfo_op = {
.show = zoneinfo_show,
};
+static int zoneinfo_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &zoneinfo_op);
+}
+
+static const struct file_operations proc_zoneinfo_file_operations = {
+ .open = zoneinfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
static void *vmstat_start(struct seq_file *m, loff_t *pos)
{
unsigned long *v;
@@ -834,13 +870,24 @@ static void vmstat_stop(struct seq_file *m, void *arg)
m->private = NULL;
}
-const struct seq_operations vmstat_op = {
+static const struct seq_operations vmstat_op = {
.start = vmstat_start,
.next = vmstat_next,
.stop = vmstat_stop,
.show = vmstat_show,
};
+static int vmstat_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &vmstat_op);
+}
+
+static const struct file_operations proc_vmstat_file_operations = {
+ .open = vmstat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SMP
@@ -898,9 +945,11 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
static struct notifier_block __cpuinitdata vmstat_notifier =
{ &vmstat_cpuup_callback, NULL, 0 };
+#endif
static int __init setup_vmstat(void)
{
+#ifdef CONFIG_SMP
int cpu;
refresh_zone_stat_thresholds();
@@ -908,7 +957,13 @@ static int __init setup_vmstat(void)
for_each_online_cpu(cpu)
start_cpu_timer(cpu);
+#endif
+#ifdef CONFIG_PROC_FS
+ proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
+ proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
+ proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
+#endif
return 0;
}
module_init(setup_vmstat)
-#endif