maccess.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /*
  2. * Access kernel memory without faulting -- s390 specific implementation.
  3. *
  4. * Copyright IBM Corp. 2009
  5. *
  6. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7. *
  8. */
  9. #include <linux/uaccess.h>
  10. #include <linux/kernel.h>
  11. #include <linux/types.h>
  12. #include <linux/errno.h>
  13. #include <linux/gfp.h>
  14. #include <linux/cpu.h>
  15. #include <asm/ctl_reg.h>
  16. #include <asm/io.h>
  17. /*
  18. * This function writes to kernel memory bypassing DAT and possible
  19. * write protection. It copies one to four bytes from src to dst
  20. * using the stura instruction.
  21. * Returns the number of bytes copied or -EFAULT.
  22. */
  23. static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
  24. {
  25. unsigned long count, aligned;
  26. int offset, mask;
  27. int rc = -EFAULT;
  28. aligned = (unsigned long) dst & ~3UL;
  29. offset = (unsigned long) dst & 3;
  30. count = min_t(unsigned long, 4 - offset, size);
  31. mask = (0xf << (4 - count)) & 0xf;
  32. mask >>= offset;
  33. asm volatile(
  34. " bras 1,0f\n"
  35. " icm 0,0,0(%3)\n"
  36. "0: l 0,0(%1)\n"
  37. " lra %1,0(%1)\n"
  38. "1: ex %2,0(1)\n"
  39. "2: stura 0,%1\n"
  40. " la %0,0\n"
  41. "3:\n"
  42. EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
  43. : "+d" (rc), "+a" (aligned)
  44. : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
  45. return rc ? rc : count;
  46. }
  47. long probe_kernel_write(void *dst, const void *src, size_t size)
  48. {
  49. long copied = 0;
  50. while (size) {
  51. copied = probe_kernel_write_odd(dst, src, size);
  52. if (copied < 0)
  53. break;
  54. dst += copied;
  55. src += copied;
  56. size -= copied;
  57. }
  58. return copied < 0 ? -EFAULT : 0;
  59. }
  60. static int __memcpy_real(void *dest, void *src, size_t count)
  61. {
  62. register unsigned long _dest asm("2") = (unsigned long) dest;
  63. register unsigned long _len1 asm("3") = (unsigned long) count;
  64. register unsigned long _src asm("4") = (unsigned long) src;
  65. register unsigned long _len2 asm("5") = (unsigned long) count;
  66. int rc = -EFAULT;
  67. asm volatile (
  68. "0: mvcle %1,%2,0x0\n"
  69. "1: jo 0b\n"
  70. " lhi %0,0x0\n"
  71. "2:\n"
  72. EX_TABLE(1b,2b)
  73. : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
  74. "+d" (_len2), "=m" (*((long *) dest))
  75. : "m" (*((long *) src))
  76. : "cc", "memory");
  77. return rc;
  78. }
  79. /*
  80. * Copy memory in real mode (kernel to kernel)
  81. */
  82. int memcpy_real(void *dest, void *src, size_t count)
  83. {
  84. unsigned long flags;
  85. int rc;
  86. if (!count)
  87. return 0;
  88. local_irq_save(flags);
  89. __arch_local_irq_stnsm(0xfbUL);
  90. rc = __memcpy_real(dest, src, count);
  91. local_irq_restore(flags);
  92. return rc;
  93. }
  94. /*
  95. * Copy memory in absolute mode (kernel to kernel)
  96. */
  97. void memcpy_absolute(void *dest, void *src, size_t count)
  98. {
  99. unsigned long cr0, flags, prefix;
  100. flags = arch_local_irq_save();
  101. __ctl_store(cr0, 0, 0);
  102. __ctl_clear_bit(0, 28); /* disable lowcore protection */
  103. prefix = store_prefix();
  104. if (prefix) {
  105. local_mcck_disable();
  106. set_prefix(0);
  107. memcpy(dest, src, count);
  108. set_prefix(prefix);
  109. local_mcck_enable();
  110. } else {
  111. memcpy(dest, src, count);
  112. }
  113. __ctl_load(cr0, 0, 0);
  114. arch_local_irq_restore(flags);
  115. }
  116. /*
  117. * Copy memory from kernel (real) to user (virtual)
  118. */
  119. int copy_to_user_real(void __user *dest, void *src, size_t count)
  120. {
  121. int offs = 0, size, rc;
  122. char *buf;
  123. buf = (char *) __get_free_page(GFP_KERNEL);
  124. if (!buf)
  125. return -ENOMEM;
  126. rc = -EFAULT;
  127. while (offs < count) {
  128. size = min(PAGE_SIZE, count - offs);
  129. if (memcpy_real(buf, src + offs, size))
  130. goto out;
  131. if (copy_to_user(dest + offs, buf, size))
  132. goto out;
  133. offs += size;
  134. }
  135. rc = 0;
  136. out:
  137. free_page((unsigned long) buf);
  138. return rc;
  139. }
  140. /*
  141. * Copy memory from user (virtual) to kernel (real)
  142. */
  143. int copy_from_user_real(void *dest, void __user *src, size_t count)
  144. {
  145. int offs = 0, size, rc;
  146. char *buf;
  147. buf = (char *) __get_free_page(GFP_KERNEL);
  148. if (!buf)
  149. return -ENOMEM;
  150. rc = -EFAULT;
  151. while (offs < count) {
  152. size = min(PAGE_SIZE, count - offs);
  153. if (copy_from_user(buf, src + offs, size))
  154. goto out;
  155. if (memcpy_real(dest + offs, buf, size))
  156. goto out;
  157. offs += size;
  158. }
  159. rc = 0;
  160. out:
  161. free_page((unsigned long) buf);
  162. return rc;
  163. }
  164. /*
  165. * Check if physical address is within prefix or zero page
  166. */
  167. static int is_swapped(unsigned long addr)
  168. {
  169. unsigned long lc;
  170. int cpu;
  171. if (addr < sizeof(struct _lowcore))
  172. return 1;
  173. for_each_online_cpu(cpu) {
  174. lc = (unsigned long) lowcore_ptr[cpu];
  175. if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
  176. continue;
  177. return 1;
  178. }
  179. return 0;
  180. }
  181. /*
  182. * Convert a physical pointer for /dev/mem access
  183. *
  184. * For swapped prefix pages a new buffer is returned that contains a copy of
  185. * the absolute memory. The buffer size is maximum one page large.
  186. */
  187. void *xlate_dev_mem_ptr(unsigned long addr)
  188. {
  189. void *bounce = (void *) addr;
  190. unsigned long size;
  191. get_online_cpus();
  192. preempt_disable();
  193. if (is_swapped(addr)) {
  194. size = PAGE_SIZE - (addr & ~PAGE_MASK);
  195. bounce = (void *) __get_free_page(GFP_ATOMIC);
  196. if (bounce)
  197. memcpy_absolute(bounce, (void *) addr, size);
  198. }
  199. preempt_enable();
  200. put_online_cpus();
  201. return bounce;
  202. }
  203. /*
  204. * Free converted buffer for /dev/mem access (if necessary)
  205. */
  206. void unxlate_dev_mem_ptr(unsigned long addr, void *buf)
  207. {
  208. if ((void *) addr != buf)
  209. free_page((unsigned long) buf);
  210. }