page-states.c 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /*
  2. * Copyright IBM Corp. 2008
  3. *
  4. * Guest page hinting for unused pages.
  5. *
  6. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/errno.h>
  10. #include <linux/types.h>
  11. #include <linux/mm.h>
  12. #include <linux/init.h>
  13. #define ESSA_SET_STABLE 1
  14. #define ESSA_SET_UNUSED 2
  15. static int cmma_flag = 1;
  16. static int __init cmma(char *str)
  17. {
  18. char *parm;
  19. parm = strstrip(str);
  20. if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
  21. cmma_flag = 1;
  22. return 1;
  23. }
  24. cmma_flag = 0;
  25. if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
  26. return 1;
  27. return 0;
  28. }
  29. __setup("cmma=", cmma);
  30. void __init cmma_init(void)
  31. {
  32. register unsigned long tmp asm("0") = 0;
  33. register int rc asm("1") = -EOPNOTSUPP;
  34. if (!cmma_flag)
  35. return;
  36. asm volatile(
  37. " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
  38. "0: la %0,0\n"
  39. "1:\n"
  40. EX_TABLE(0b,1b)
  41. : "+&d" (rc), "+&d" (tmp));
  42. if (rc)
  43. cmma_flag = 0;
  44. }
  45. static inline void set_page_unstable(struct page *page, int order)
  46. {
  47. int i, rc;
  48. for (i = 0; i < (1 << order); i++)
  49. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  50. : "=&d" (rc)
  51. : "a" (page_to_phys(page + i)),
  52. "i" (ESSA_SET_UNUSED));
  53. }
  54. void arch_free_page(struct page *page, int order)
  55. {
  56. if (!cmma_flag)
  57. return;
  58. set_page_unstable(page, order);
  59. }
  60. static inline void set_page_stable(struct page *page, int order)
  61. {
  62. int i, rc;
  63. for (i = 0; i < (1 << order); i++)
  64. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  65. : "=&d" (rc)
  66. : "a" (page_to_phys(page + i)),
  67. "i" (ESSA_SET_STABLE));
  68. }
  69. void arch_alloc_page(struct page *page, int order)
  70. {
  71. if (!cmma_flag)
  72. return;
  73. set_page_stable(page, order);
  74. }
  75. void arch_set_page_states(int make_stable)
  76. {
  77. unsigned long flags, order, t;
  78. struct list_head *l;
  79. struct page *page;
  80. struct zone *zone;
  81. if (!cmma_flag)
  82. return;
  83. if (make_stable)
  84. drain_local_pages(NULL);
  85. for_each_populated_zone(zone) {
  86. spin_lock_irqsave(&zone->lock, flags);
  87. for_each_migratetype_order(order, t) {
  88. list_for_each(l, &zone->free_area[order].free_list[t]) {
  89. page = list_entry(l, struct page, lru);
  90. if (make_stable)
  91. set_page_stable(page, order);
  92. else
  93. set_page_unstable(page, order);
  94. }
  95. }
  96. spin_unlock_irqrestore(&zone->lock, flags);
  97. }
  98. }