maccess.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /*
  2. * Access kernel memory without faulting -- s390 specific implementation.
  3. *
  4. * Copyright IBM Corp. 2009
  5. *
  6. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7. *
  8. */
  9. #include <linux/uaccess.h>
  10. #include <linux/kernel.h>
  11. #include <linux/types.h>
  12. #include <linux/errno.h>
  13. #include <linux/gfp.h>
  14. #include <linux/cpu.h>
  15. #include <asm/ctl_reg.h>
  16. /*
  17. * This function writes to kernel memory bypassing DAT and possible
  18. * write protection. It copies one to four bytes from src to dst
  19. * using the stura instruction.
  20. * Returns the number of bytes copied or -EFAULT.
  21. */
  22. static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
  23. {
  24. unsigned long count, aligned;
  25. int offset, mask;
  26. int rc = -EFAULT;
  27. aligned = (unsigned long) dst & ~3UL;
  28. offset = (unsigned long) dst & 3;
  29. count = min_t(unsigned long, 4 - offset, size);
  30. mask = (0xf << (4 - count)) & 0xf;
  31. mask >>= offset;
  32. asm volatile(
  33. " bras 1,0f\n"
  34. " icm 0,0,0(%3)\n"
  35. "0: l 0,0(%1)\n"
  36. " lra %1,0(%1)\n"
  37. "1: ex %2,0(1)\n"
  38. "2: stura 0,%1\n"
  39. " la %0,0\n"
  40. "3:\n"
  41. EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
  42. : "+d" (rc), "+a" (aligned)
  43. : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
  44. return rc ? rc : count;
  45. }
  46. long probe_kernel_write(void *dst, const void *src, size_t size)
  47. {
  48. long copied = 0;
  49. while (size) {
  50. copied = probe_kernel_write_odd(dst, src, size);
  51. if (copied < 0)
  52. break;
  53. dst += copied;
  54. src += copied;
  55. size -= copied;
  56. }
  57. return copied < 0 ? -EFAULT : 0;
  58. }
  59. static int __memcpy_real(void *dest, void *src, size_t count)
  60. {
  61. register unsigned long _dest asm("2") = (unsigned long) dest;
  62. register unsigned long _len1 asm("3") = (unsigned long) count;
  63. register unsigned long _src asm("4") = (unsigned long) src;
  64. register unsigned long _len2 asm("5") = (unsigned long) count;
  65. int rc = -EFAULT;
  66. asm volatile (
  67. "0: mvcle %1,%2,0x0\n"
  68. "1: jo 0b\n"
  69. " lhi %0,0x0\n"
  70. "2:\n"
  71. EX_TABLE(1b,2b)
  72. : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
  73. "+d" (_len2), "=m" (*((long *) dest))
  74. : "m" (*((long *) src))
  75. : "cc", "memory");
  76. return rc;
  77. }
  78. /*
  79. * Copy memory in real mode (kernel to kernel)
  80. */
  81. int memcpy_real(void *dest, void *src, size_t count)
  82. {
  83. unsigned long flags;
  84. int rc;
  85. if (!count)
  86. return 0;
  87. local_irq_save(flags);
  88. __arch_local_irq_stnsm(0xfbUL);
  89. rc = __memcpy_real(dest, src, count);
  90. local_irq_restore(flags);
  91. return rc;
  92. }
  93. /*
  94. * Copy memory in absolute mode (kernel to kernel)
  95. */
  96. void memcpy_absolute(void *dest, void *src, size_t count)
  97. {
  98. unsigned long cr0, flags, prefix;
  99. flags = arch_local_irq_save();
  100. __ctl_store(cr0, 0, 0);
  101. __ctl_clear_bit(0, 28); /* disable lowcore protection */
  102. prefix = store_prefix();
  103. if (prefix) {
  104. local_mcck_disable();
  105. set_prefix(0);
  106. memcpy(dest, src, count);
  107. set_prefix(prefix);
  108. local_mcck_enable();
  109. } else {
  110. memcpy(dest, src, count);
  111. }
  112. __ctl_load(cr0, 0, 0);
  113. arch_local_irq_restore(flags);
  114. }
  115. /*
  116. * Copy memory from kernel (real) to user (virtual)
  117. */
  118. int copy_to_user_real(void __user *dest, void *src, size_t count)
  119. {
  120. int offs = 0, size, rc;
  121. char *buf;
  122. buf = (char *) __get_free_page(GFP_KERNEL);
  123. if (!buf)
  124. return -ENOMEM;
  125. rc = -EFAULT;
  126. while (offs < count) {
  127. size = min(PAGE_SIZE, count - offs);
  128. if (memcpy_real(buf, src + offs, size))
  129. goto out;
  130. if (copy_to_user(dest + offs, buf, size))
  131. goto out;
  132. offs += size;
  133. }
  134. rc = 0;
  135. out:
  136. free_page((unsigned long) buf);
  137. return rc;
  138. }
  139. /*
  140. * Copy memory from user (virtual) to kernel (real)
  141. */
  142. int copy_from_user_real(void *dest, void __user *src, size_t count)
  143. {
  144. int offs = 0, size, rc;
  145. char *buf;
  146. buf = (char *) __get_free_page(GFP_KERNEL);
  147. if (!buf)
  148. return -ENOMEM;
  149. rc = -EFAULT;
  150. while (offs < count) {
  151. size = min(PAGE_SIZE, count - offs);
  152. if (copy_from_user(buf, src + offs, size))
  153. goto out;
  154. if (memcpy_real(dest + offs, buf, size))
  155. goto out;
  156. offs += size;
  157. }
  158. rc = 0;
  159. out:
  160. free_page((unsigned long) buf);
  161. return rc;
  162. }
  163. /*
  164. * Check if physical address is within prefix or zero page
  165. */
  166. static int is_swapped(unsigned long addr)
  167. {
  168. unsigned long lc;
  169. int cpu;
  170. if (addr < sizeof(struct _lowcore))
  171. return 1;
  172. for_each_online_cpu(cpu) {
  173. lc = (unsigned long) lowcore_ptr[cpu];
  174. if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
  175. continue;
  176. return 1;
  177. }
  178. return 0;
  179. }
  180. /*
  181. * Convert a physical pointer for /dev/mem access
  182. *
  183. * For swapped prefix pages a new buffer is returned that contains a copy of
  184. * the absolute memory. The buffer size is maximum one page large.
  185. */
  186. void *xlate_dev_mem_ptr(unsigned long addr)
  187. {
  188. void *bounce = (void *) addr;
  189. unsigned long size;
  190. get_online_cpus();
  191. preempt_disable();
  192. if (is_swapped(addr)) {
  193. size = PAGE_SIZE - (addr & ~PAGE_MASK);
  194. bounce = (void *) __get_free_page(GFP_ATOMIC);
  195. if (bounce)
  196. memcpy_absolute(bounce, (void *) addr, size);
  197. }
  198. preempt_enable();
  199. put_online_cpus();
  200. return bounce;
  201. }
  202. /*
  203. * Free converted buffer for /dev/mem access (if necessary)
  204. */
  205. void unxlate_dev_mem_ptr(unsigned long addr, void *buf)
  206. {
  207. if ((void *) addr != buf)
  208. free_page((unsigned long) buf);
  209. }