page-states.c 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. /*
  2. * arch/s390/mm/page-states.c
  3. *
  4. * Copyright IBM Corp. 2008
  5. *
  6. * Guest page hinting for unused pages.
  7. *
  8. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/types.h>
  13. #include <linux/mm.h>
  14. #include <linux/init.h>
  15. #define ESSA_SET_STABLE 1
  16. #define ESSA_SET_UNUSED 2
  17. static int cmma_flag;
  18. static int __init cmma(char *str)
  19. {
  20. char *parm;
  21. parm = strstrip(str);
  22. if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
  23. cmma_flag = 1;
  24. return 1;
  25. }
  26. cmma_flag = 0;
  27. if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
  28. return 1;
  29. return 0;
  30. }
  31. __setup("cmma=", cmma);
  32. void __init cmma_init(void)
  33. {
  34. register unsigned long tmp asm("0") = 0;
  35. register int rc asm("1") = -EOPNOTSUPP;
  36. if (!cmma_flag)
  37. return;
  38. asm volatile(
  39. " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
  40. "0: la %0,0\n"
  41. "1:\n"
  42. EX_TABLE(0b,1b)
  43. : "+&d" (rc), "+&d" (tmp));
  44. if (rc)
  45. cmma_flag = 0;
  46. }
  47. void arch_free_page(struct page *page, int order)
  48. {
  49. int i, rc;
  50. if (!cmma_flag)
  51. return;
  52. for (i = 0; i < (1 << order); i++)
  53. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  54. : "=&d" (rc)
  55. : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT),
  56. "i" (ESSA_SET_UNUSED));
  57. }
  58. void arch_alloc_page(struct page *page, int order)
  59. {
  60. int i, rc;
  61. if (!cmma_flag)
  62. return;
  63. for (i = 0; i < (1 << order); i++)
  64. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  65. : "=&d" (rc)
  66. : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT),
  67. "i" (ESSA_SET_STABLE));
  68. }