summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-08-02 12:01:03 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2008-08-04 21:31:34 -0700
commit529ae9aaa08378cfe2a4350bded76f32cc8ff0ce (patch)
treed3ae998f9876c72a83a022805103a92111852b21 /include
parente9ba9698187ddbc0c5bfcf41de0349a662d23d02 (diff)
mm: rename page trylock
Converting page lock to new locking bitops requires a change of page flag operation naming, so we might as well convert it to something nicer (!TestSetPageLocked_Lock => trylock_page, SetPageLocked => set_page_locked). This also facilitates lockdeping of page lock. Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/pagemap.h67
2 files changed, 42 insertions, 27 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 25aaccdb2f26..c74d3e875314 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -163,7 +163,7 @@ static inline int Page##uname(struct page *page) \
struct page; /* forward declaration */
-PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked)
+TESTPAGEFLAG(Locked, locked)
PAGEFLAG(Error, error)
PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 69ed3cb1197a..5da31c12101c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -250,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
return read_cache_page(mapping, index, filler, data);
}
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
-int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
-extern void remove_from_page_cache(struct page *page);
-extern void __remove_from_page_cache(struct page *page);
-
-/*
- * Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run SetPageLocked() against it.
- */
-static inline int add_to_page_cache(struct page *page,
- struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
-{
- int error;
-
- SetPageLocked(page);
- error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
- if (unlikely(error))
- ClearPageLocked(page);
- return error;
-}
-
/*
* Return byte-offset into filesystem object for page.
*/
@@ -294,13 +271,28 @@ extern int __lock_page_killable(struct page *page);
extern void __lock_page_nosync(struct page *page);
extern void unlock_page(struct page *page);
+static inline void set_page_locked(struct page *page)
+{
+ set_bit(PG_locked, &page->flags);
+}
+
+static inline void clear_page_locked(struct page *page)
+{
+ clear_bit(PG_locked, &page->flags);
+}
+
+static inline int trylock_page(struct page *page)
+{
+ return !test_and_set_bit(PG_locked, &page->flags);
+}
+
/*
* lock_page may only be called if we have the page's inode pinned.
*/
static inline void lock_page(struct page *page)
{
might_sleep();
- if (TestSetPageLocked(page))
+ if (!trylock_page(page))
__lock_page(page);
}
@@ -312,7 +304,7 @@ static inline void lock_page(struct page *page)
static inline int lock_page_killable(struct page *page)
{
might_sleep();
- if (TestSetPageLocked(page))
+ if (!trylock_page(page))
return __lock_page_killable(page);
return 0;
}
@@ -324,7 +316,7 @@ static inline int lock_page_killable(struct page *page)
static inline void lock_page_nosync(struct page *page)
{
might_sleep();
- if (TestSetPageLocked(page))
+ if (!trylock_page(page))
__lock_page_nosync(page);
}
@@ -409,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
return ret;
}
+int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
+int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
+extern void remove_from_page_cache(struct page *page);
+extern void __remove_from_page_cache(struct page *page);
+
+/*
+ * Like add_to_page_cache_locked, but used to add newly allocated pages:
+ * the page is new, so we can just run set_page_locked() against it.
+ */
+static inline int add_to_page_cache(struct page *page,
+ struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
+{
+ int error;
+
+ set_page_locked(page);
+ error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
+ if (unlikely(error))
+ clear_page_locked(page);
+ return error;
+}
+
#endif /* _LINUX_PAGEMAP_H */