maccess.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /*
  2. * Access kernel memory without faulting -- s390 specific implementation.
  3. *
  4. * Copyright IBM Corp. 2009
  5. *
  6. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7. *
  8. */
  9. #include <linux/uaccess.h>
  10. #include <linux/kernel.h>
  11. #include <linux/types.h>
  12. #include <linux/errno.h>
  13. #include <linux/gfp.h>
  14. #include <asm/system.h>
  15. /*
  16. * This function writes to kernel memory bypassing DAT and possible
  17. * write protection. It copies one to four bytes from src to dst
  18. * using the stura instruction.
  19. * Returns the number of bytes copied or -EFAULT.
  20. */
  21. static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
  22. {
  23. unsigned long count, aligned;
  24. int offset, mask;
  25. int rc = -EFAULT;
  26. aligned = (unsigned long) dst & ~3UL;
  27. offset = (unsigned long) dst & 3;
  28. count = min_t(unsigned long, 4 - offset, size);
  29. mask = (0xf << (4 - count)) & 0xf;
  30. mask >>= offset;
  31. asm volatile(
  32. " bras 1,0f\n"
  33. " icm 0,0,0(%3)\n"
  34. "0: l 0,0(%1)\n"
  35. " lra %1,0(%1)\n"
  36. "1: ex %2,0(1)\n"
  37. "2: stura 0,%1\n"
  38. " la %0,0\n"
  39. "3:\n"
  40. EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
  41. : "+d" (rc), "+a" (aligned)
  42. : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
  43. return rc ? rc : count;
  44. }
  45. long probe_kernel_write(void *dst, const void *src, size_t size)
  46. {
  47. long copied = 0;
  48. while (size) {
  49. copied = probe_kernel_write_odd(dst, src, size);
  50. if (copied < 0)
  51. break;
  52. dst += copied;
  53. src += copied;
  54. size -= copied;
  55. }
  56. return copied < 0 ? -EFAULT : 0;
  57. }
  58. /*
  59. * Copy memory in real mode (kernel to kernel)
  60. */
  61. int memcpy_real(void *dest, void *src, size_t count)
  62. {
  63. register unsigned long _dest asm("2") = (unsigned long) dest;
  64. register unsigned long _len1 asm("3") = (unsigned long) count;
  65. register unsigned long _src asm("4") = (unsigned long) src;
  66. register unsigned long _len2 asm("5") = (unsigned long) count;
  67. unsigned long flags;
  68. int rc = -EFAULT;
  69. if (!count)
  70. return 0;
  71. flags = __arch_local_irq_stnsm(0xf8UL);
  72. asm volatile (
  73. "0: mvcle %1,%2,0x0\n"
  74. "1: jo 0b\n"
  75. " lhi %0,0x0\n"
  76. "2:\n"
  77. EX_TABLE(1b,2b)
  78. : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
  79. "+d" (_len2), "=m" (*((long *) dest))
  80. : "m" (*((long *) src))
  81. : "cc", "memory");
  82. arch_local_irq_restore(flags);
  83. return rc;
  84. }
  85. /*
  86. * Copy memory to absolute zero
  87. */
  88. void copy_to_absolute_zero(void *dest, void *src, size_t count)
  89. {
  90. unsigned long cr0;
  91. BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
  92. preempt_disable();
  93. __ctl_store(cr0, 0, 0);
  94. __ctl_clear_bit(0, 28); /* disable lowcore protection */
  95. memcpy_real(dest + store_prefix(), src, count);
  96. __ctl_load(cr0, 0, 0);
  97. preempt_enable();
  98. }
  99. /*
  100. * Copy memory from kernel (real) to user (virtual)
  101. */
  102. int copy_to_user_real(void __user *dest, void *src, size_t count)
  103. {
  104. int offs = 0, size, rc;
  105. char *buf;
  106. buf = (char *) __get_free_page(GFP_KERNEL);
  107. if (!buf)
  108. return -ENOMEM;
  109. rc = -EFAULT;
  110. while (offs < count) {
  111. size = min(PAGE_SIZE, count - offs);
  112. if (memcpy_real(buf, src + offs, size))
  113. goto out;
  114. if (copy_to_user(dest + offs, buf, size))
  115. goto out;
  116. offs += size;
  117. }
  118. rc = 0;
  119. out:
  120. free_page((unsigned long) buf);
  121. return rc;
  122. }
  123. /*
  124. * Copy memory from user (virtual) to kernel (real)
  125. */
  126. int copy_from_user_real(void *dest, void __user *src, size_t count)
  127. {
  128. int offs = 0, size, rc;
  129. char *buf;
  130. buf = (char *) __get_free_page(GFP_KERNEL);
  131. if (!buf)
  132. return -ENOMEM;
  133. rc = -EFAULT;
  134. while (offs < count) {
  135. size = min(PAGE_SIZE, count - offs);
  136. if (copy_from_user(buf, src + offs, size))
  137. goto out;
  138. if (memcpy_real(dest + offs, buf, size))
  139. goto out;
  140. offs += size;
  141. }
  142. rc = 0;
  143. out:
  144. free_page((unsigned long) buf);
  145. return rc;
  146. }