|
@@ -278,9 +278,10 @@ int lru_add_drain_all(void)
|
|
|
* Avoid taking zone->lru_lock if possible, but if it is taken, retain it
|
|
|
* for the remainder of the operation.
|
|
|
*
|
|
|
- * The locking in this function is against shrink_cache(): we recheck the
|
|
|
- * page count inside the lock to see whether shrink_cache grabbed the page
|
|
|
- * via the LRU. If it did, give up: shrink_cache will free it.
|
|
|
+ * The locking in this function is against shrink_inactive_list(): we recheck
|
|
|
+ * the page count inside the lock to see whether shrink_inactive_list()
|
|
|
+ * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
|
|
|
+ * will free it.
|
|
|
*/
|
|
|
void release_pages(struct page **pages, int nr, int cold)
|
|
|
{
|