|
@@ -286,15 +286,6 @@ struct page {
|
|
|
*
|
|
|
* Also, many kernel routines increase the page count before a critical
|
|
|
* routine so they can be sure the page doesn't go away from under them.
|
|
|
- *
|
|
|
- * Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we
|
|
|
- * can use atomic_add_negative(-1, page->_count) to detect when the page
|
|
|
- * becomes free and so that we can also use atomic_inc_and_test to atomically
|
|
|
- * detect when we just tried to grab a ref on a page which some other CPU has
|
|
|
- * already deemed to be freeable.
|
|
|
- *
|
|
|
- * NO code should make assumptions about this internal detail! Use the provided
|
|
|
- * macros which retain the old rules: page_count(page) == 0 is a free page.
|
|
|
*/
|
|
|
|
|
|
/*
|
|
@@ -303,8 +294,8 @@ struct page {
|
|
|
*/
|
|
|
static inline int put_page_testzero(struct page *page)
|
|
|
{
|
|
|
- BUG_ON(atomic_read(&page->_count) == -1);
|
|
|
- return atomic_add_negative(-1, &page->_count);
|
|
|
+ BUG_ON(atomic_read(&page->_count) == 0);
|
|
|
+ return atomic_dec_and_test(&page->_count);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -313,10 +304,10 @@ static inline int put_page_testzero(struct page *page)
|
|
|
*/
|
|
|
static inline int get_page_unless_zero(struct page *page)
|
|
|
{
|
|
|
- return atomic_add_unless(&page->_count, 1, -1);
|
|
|
+ return atomic_inc_not_zero(&page->_count);
|
|
|
}
|
|
|
|
|
|
-#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1)
|
|
|
+#define set_page_count(p,v) atomic_set(&(p)->_count, (v))
|
|
|
#define __put_page(p) atomic_dec(&(p)->_count)
|
|
|
|
|
|
extern void FASTCALL(__page_cache_release(struct page *));
|
|
@@ -325,7 +316,7 @@ static inline int page_count(struct page *page)
|
|
|
{
|
|
|
if (PageCompound(page))
|
|
|
page = (struct page *)page_private(page);
|
|
|
- return atomic_read(&page->_count) + 1;
|
|
|
+ return atomic_read(&page->_count);
|
|
|
}
|
|
|
|
|
|
static inline void get_page(struct page *page)
|