small_page.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. /*
  2. * linux/arch/arm26/mm/small_page.c
  3. *
  4. * Copyright (C) 1996 Russell King
  5. * Copyright (C) 2003 Ian Molton
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * Changelog:
  12. * 26/01/1996 RMK Cleaned up various areas to make little more generic
  13. * 07/02/1999 RMK Support added for 16K and 32K page sizes
  14. * containing 8K blocks
  15. * 23/05/2004 IM Fixed to use struct page->lru (thanks wli)
  16. *
  17. */
  18. #include <linux/signal.h>
  19. #include <linux/sched.h>
  20. #include <linux/kernel.h>
  21. #include <linux/errno.h>
  22. #include <linux/string.h>
  23. #include <linux/types.h>
  24. #include <linux/ptrace.h>
  25. #include <linux/mman.h>
  26. #include <linux/mm.h>
  27. #include <linux/swap.h>
  28. #include <linux/smp.h>
  29. #include <linux/bitops.h>
  30. #include <asm/pgtable.h>
  31. #define PEDANTIC
  32. /*
  33. * Requirement:
  34. * We need to be able to allocate naturally aligned memory of finer
  35. * granularity than the page size. This is typically used for the
  36. * second level page tables on 32-bit ARMs.
  37. *
  38. * FIXME - this comment is *out of date*
  39. * Theory:
  40. * We "misuse" the Linux memory management system. We use alloc_page
  41. * to allocate a page and then mark it as reserved. The Linux memory
  42. * management system will then ignore the "offset", "next_hash" and
  43. * "pprev_hash" entries in the mem_map for this page.
  44. *
  45. * We then use a bitstring in the "offset" field to mark which segments
  46. * of the page are in use, and manipulate this as required during the
  47. * allocation and freeing of these small pages.
  48. *
  49. * We also maintain a queue of pages being used for this purpose using
  50. * the "next_hash" and "pprev_hash" entries of mem_map;
  51. */
  52. struct order {
  53. struct list_head queue;
  54. unsigned int mask; /* (1 << shift) - 1 */
  55. unsigned int shift; /* (1 << shift) size of page */
  56. unsigned int block_mask; /* nr_blocks - 1 */
  57. unsigned int all_used; /* (1 << nr_blocks) - 1 */
  58. };
  59. static struct order orders[] = {
  60. #if PAGE_SIZE == 32768
  61. { LIST_HEAD_INIT(orders[0].queue), 2047, 11, 15, 0x0000ffff },
  62. { LIST_HEAD_INIT(orders[1].queue), 8191, 13, 3, 0x0000000f }
  63. #else
  64. #error unsupported page size (ARGH!)
  65. #endif
  66. };
  67. #define USED_MAP(pg) ((pg)->index)
  68. #define TEST_AND_CLEAR_USED(pg,off) (test_and_clear_bit(off, &USED_MAP(pg)))
  69. #define SET_USED(pg,off) (set_bit(off, &USED_MAP(pg)))
  70. static DEFINE_SPINLOCK(small_page_lock);
  71. static unsigned long __get_small_page(int priority, struct order *order)
  72. {
  73. unsigned long flags;
  74. struct page *page;
  75. int offset;
  76. do {
  77. spin_lock_irqsave(&small_page_lock, flags);
  78. if (list_empty(&order->queue))
  79. goto need_new_page;
  80. page = list_entry(order->queue.next, struct page, lru);
  81. again:
  82. #ifdef PEDANTIC
  83. BUG_ON(USED_MAP(page) & ~order->all_used);
  84. #endif
  85. offset = ffz(USED_MAP(page));
  86. SET_USED(page, offset);
  87. if (USED_MAP(page) == order->all_used)
  88. list_del_init(&page->lru);
  89. spin_unlock_irqrestore(&small_page_lock, flags);
  90. return (unsigned long) page_address(page) + (offset << order->shift);
  91. need_new_page:
  92. spin_unlock_irqrestore(&small_page_lock, flags);
  93. page = alloc_page(priority);
  94. spin_lock_irqsave(&small_page_lock, flags);
  95. if (list_empty(&order->queue)) {
  96. if (!page)
  97. goto no_page;
  98. SetPageReserved(page);
  99. USED_MAP(page) = 0;
  100. list_add(&page->lru, &order->queue);
  101. goto again;
  102. }
  103. spin_unlock_irqrestore(&small_page_lock, flags);
  104. __free_page(page);
  105. } while (1);
  106. no_page:
  107. spin_unlock_irqrestore(&small_page_lock, flags);
  108. return 0;
  109. }
  110. static void __free_small_page(unsigned long spage, struct order *order)
  111. {
  112. unsigned long flags;
  113. struct page *page;
  114. if (virt_addr_valid(spage)) {
  115. page = virt_to_page(spage);
  116. /*
  117. * The container-page must be marked Reserved
  118. */
  119. if (!PageReserved(page) || spage & order->mask)
  120. goto non_small;
  121. #ifdef PEDANTIC
  122. BUG_ON(USED_MAP(page) & ~order->all_used);
  123. #endif
  124. spage = spage >> order->shift;
  125. spage &= order->block_mask;
  126. /*
  127. * the following must be atomic wrt get_page
  128. */
  129. spin_lock_irqsave(&small_page_lock, flags);
  130. if (USED_MAP(page) == order->all_used)
  131. list_add(&page->lru, &order->queue);
  132. if (!TEST_AND_CLEAR_USED(page, spage))
  133. goto already_free;
  134. if (USED_MAP(page) == 0)
  135. goto free_page;
  136. spin_unlock_irqrestore(&small_page_lock, flags);
  137. }
  138. return;
  139. free_page:
  140. /*
  141. * unlink the page from the small page queue and free it
  142. */
  143. list_del_init(&page->lru);
  144. spin_unlock_irqrestore(&small_page_lock, flags);
  145. ClearPageReserved(page);
  146. __free_page(page);
  147. return;
  148. non_small:
  149. printk("Trying to free non-small page from %p\n", __builtin_return_address(0));
  150. return;
  151. already_free:
  152. printk("Trying to free free small page from %p\n", __builtin_return_address(0));
  153. }
  154. unsigned long get_page_8k(int priority)
  155. {
  156. return __get_small_page(priority, orders+1);
  157. }
  158. void free_page_8k(unsigned long spage)
  159. {
  160. __free_small_page(spage, orders+1);
  161. }