blockops.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. #include <linux/kernel.h>
  2. #include <linux/init.h>
  3. #include <linux/errno.h>
  4. #include <linux/mm.h>
  5. #include <asm/memory.h>
  6. #include <asm/ptrace.h>
  7. #include <asm/cacheflush.h>
  8. #include <asm/traps.h>
  9. extern struct cpu_cache_fns blk_cache_fns;
  10. #define HARVARD_CACHE
  11. /*
  12. * blk_flush_kern_dcache_page(kaddr)
  13. *
  14. * Ensure that the data held in the page kaddr is written back
  15. * to the page in question.
  16. *
  17. * - kaddr - kernel address (guaranteed to be page aligned)
  18. */
  19. static void __attribute__((naked))
  20. blk_flush_kern_dcache_page(void *kaddr)
  21. {
  22. asm(
  23. "add r1, r0, %0 \n\
  24. sub r1, r1, %1 \n\
  25. 1: .word 0xec401f0e @ mcrr p15, 0, r0, r1, c14, 0 @ blocking \n\
  26. mov r0, #0 \n\
  27. mcr p15, 0, r0, c7, c5, 0 \n\
  28. mcr p15, 0, r0, c7, c10, 4 \n\
  29. mov pc, lr"
  30. :
  31. : "I" (PAGE_SIZE), "I" (L1_CACHE_BYTES));
  32. }
  33. /*
  34. * blk_dma_inv_range(start,end)
  35. *
  36. * Invalidate the data cache within the specified region; we will
  37. * be performing a DMA operation in this region and we want to
  38. * purge old data in the cache.
  39. *
  40. * - start - virtual start address of region
  41. * - end - virtual end address of region
  42. */
  43. static void __attribute__((naked))
  44. blk_dma_inv_range_unified(unsigned long start, unsigned long end)
  45. {
  46. asm(
  47. "tst r0, %0 \n\
  48. mcrne p15, 0, r0, c7, c11, 1 @ clean unified line \n\
  49. tst r1, %0 \n\
  50. mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line\n\
  51. .word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
  52. mov r0, #0 \n\
  53. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
  54. mov pc, lr"
  55. :
  56. : "I" (L1_CACHE_BYTES - 1));
  57. }
  58. static void __attribute__((naked))
  59. blk_dma_inv_range_harvard(unsigned long start, unsigned long end)
  60. {
  61. asm(
  62. "tst r0, %0 \n\
  63. mcrne p15, 0, r0, c7, c10, 1 @ clean D line \n\
  64. tst r1, %0 \n\
  65. mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line \n\
  66. .word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
  67. mov r0, #0 \n\
  68. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
  69. mov pc, lr"
  70. :
  71. : "I" (L1_CACHE_BYTES - 1));
  72. }
  73. /*
  74. * blk_dma_clean_range(start,end)
  75. * - start - virtual start address of region
  76. * - end - virtual end address of region
  77. */
  78. static void __attribute__((naked))
  79. blk_dma_clean_range(unsigned long start, unsigned long end)
  80. {
  81. asm(
  82. ".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0 @ blocking \n\
  83. mov r0, #0 \n\
  84. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
  85. mov pc, lr");
  86. }
  87. /*
  88. * blk_dma_flush_range(start,end)
  89. * - start - virtual start address of region
  90. * - end - virtual end address of region
  91. */
  92. static void __attribute__((naked))
  93. blk_dma_flush_range(unsigned long start, unsigned long end)
  94. {
  95. asm(
  96. ".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0 @ blocking \n\
  97. mov pc, lr");
  98. }
  99. static int blockops_trap(struct pt_regs *regs, unsigned int instr)
  100. {
  101. regs->ARM_r4 |= regs->ARM_r2;
  102. regs->ARM_pc += 4;
  103. return 0;
  104. }
  105. static char *func[] = {
  106. "Prefetch data range",
  107. "Clean+Invalidate data range",
  108. "Clean data range",
  109. "Invalidate data range",
  110. "Invalidate instr range"
  111. };
  112. static struct undef_hook blockops_hook __initdata = {
  113. .instr_mask = 0x0fffffd0,
  114. .instr_val = 0x0c401f00,
  115. .cpsr_mask = PSR_T_BIT,
  116. .cpsr_val = 0,
  117. .fn = blockops_trap,
  118. };
  119. static int __init blockops_check(void)
  120. {
  121. register unsigned int err asm("r4") = 0;
  122. unsigned int err_pos = 1;
  123. unsigned int cache_type;
  124. int i;
  125. asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (cache_type));
  126. printk("Checking V6 block cache operations:\n");
  127. register_undef_hook(&blockops_hook);
  128. __asm__ ("mov r0, %0\n\t"
  129. "mov r1, %1\n\t"
  130. "mov r2, #1\n\t"
  131. ".word 0xec401f2c @ mcrr p15, 0, r1, r0, c12, 2\n\t"
  132. "mov r2, #2\n\t"
  133. ".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0\n\t"
  134. "mov r2, #4\n\t"
  135. ".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0\n\t"
  136. "mov r2, #8\n\t"
  137. ".word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0\n\t"
  138. "mov r2, #16\n\t"
  139. ".word 0xec401f05 @ mcrr p15, 0, r1, r0, c5, 0\n\t"
  140. :
  141. : "r" (PAGE_OFFSET), "r" (PAGE_OFFSET + 128)
  142. : "r0", "r1", "r2");
  143. unregister_undef_hook(&blockops_hook);
  144. for (i = 0; i < ARRAY_SIZE(func); i++, err_pos <<= 1)
  145. printk("%30s: %ssupported\n", func[i], err & err_pos ? "not " : "");
  146. if ((err & 8) == 0) {
  147. printk(" --> Using %s block cache invalidate\n",
  148. cache_type & (1 << 24) ? "harvard" : "unified");
  149. if (cache_type & (1 << 24))
  150. cpu_cache.dma_inv_range = blk_dma_inv_range_harvard;
  151. else
  152. cpu_cache.dma_inv_range = blk_dma_inv_range_unified;
  153. }
  154. if ((err & 4) == 0) {
  155. printk(" --> Using block cache clean\n");
  156. cpu_cache.dma_clean_range = blk_dma_clean_range;
  157. }
  158. if ((err & 2) == 0) {
  159. printk(" --> Using block cache clean+invalidate\n");
  160. cpu_cache.dma_flush_range = blk_dma_flush_range;
  161. cpu_cache.flush_kern_dcache_page = blk_flush_kern_dcache_page;
  162. }
  163. return 0;
  164. }
  165. __initcall(blockops_check);