flush-sh4.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. #include <linux/mm.h>
  2. #include <asm/mmu_context.h>
  3. #include <asm/cacheflush.h>
  4. /*
  5. * Write back the dirty D-caches, but not invalidate them.
  6. *
  7. * START: Virtual Address (U0, P1, or P3)
  8. * SIZE: Size of the region.
  9. */
  10. void __weak __flush_wback_region(void *start, int size)
  11. {
  12. reg_size_t aligned_start, v, cnt, end;
  13. aligned_start = register_align(start);
  14. v = aligned_start & ~(L1_CACHE_BYTES-1);
  15. end = (aligned_start + size + L1_CACHE_BYTES-1)
  16. & ~(L1_CACHE_BYTES-1);
  17. cnt = (end - v) / L1_CACHE_BYTES;
  18. while (cnt >= 8) {
  19. asm volatile("ocbwb @%0" : : "r" (v));
  20. v += L1_CACHE_BYTES;
  21. asm volatile("ocbwb @%0" : : "r" (v));
  22. v += L1_CACHE_BYTES;
  23. asm volatile("ocbwb @%0" : : "r" (v));
  24. v += L1_CACHE_BYTES;
  25. asm volatile("ocbwb @%0" : : "r" (v));
  26. v += L1_CACHE_BYTES;
  27. asm volatile("ocbwb @%0" : : "r" (v));
  28. v += L1_CACHE_BYTES;
  29. asm volatile("ocbwb @%0" : : "r" (v));
  30. v += L1_CACHE_BYTES;
  31. asm volatile("ocbwb @%0" : : "r" (v));
  32. v += L1_CACHE_BYTES;
  33. asm volatile("ocbwb @%0" : : "r" (v));
  34. v += L1_CACHE_BYTES;
  35. cnt -= 8;
  36. }
  37. while (cnt) {
  38. asm volatile("ocbwb @%0" : : "r" (v));
  39. v += L1_CACHE_BYTES;
  40. cnt--;
  41. }
  42. }
  43. /*
  44. * Write back the dirty D-caches and invalidate them.
  45. *
  46. * START: Virtual Address (U0, P1, or P3)
  47. * SIZE: Size of the region.
  48. */
  49. void __weak __flush_purge_region(void *start, int size)
  50. {
  51. reg_size_t aligned_start, v, cnt, end;
  52. aligned_start = register_align(start);
  53. v = aligned_start & ~(L1_CACHE_BYTES-1);
  54. end = (aligned_start + size + L1_CACHE_BYTES-1)
  55. & ~(L1_CACHE_BYTES-1);
  56. cnt = (end - v) / L1_CACHE_BYTES;
  57. while (cnt >= 8) {
  58. asm volatile("ocbp @%0" : : "r" (v));
  59. v += L1_CACHE_BYTES;
  60. asm volatile("ocbp @%0" : : "r" (v));
  61. v += L1_CACHE_BYTES;
  62. asm volatile("ocbp @%0" : : "r" (v));
  63. v += L1_CACHE_BYTES;
  64. asm volatile("ocbp @%0" : : "r" (v));
  65. v += L1_CACHE_BYTES;
  66. asm volatile("ocbp @%0" : : "r" (v));
  67. v += L1_CACHE_BYTES;
  68. asm volatile("ocbp @%0" : : "r" (v));
  69. v += L1_CACHE_BYTES;
  70. asm volatile("ocbp @%0" : : "r" (v));
  71. v += L1_CACHE_BYTES;
  72. asm volatile("ocbp @%0" : : "r" (v));
  73. v += L1_CACHE_BYTES;
  74. cnt -= 8;
  75. }
  76. while (cnt) {
  77. asm volatile("ocbp @%0" : : "r" (v));
  78. v += L1_CACHE_BYTES;
  79. cnt--;
  80. }
  81. }
  82. /*
  83. * No write back please
  84. */
  85. void __weak __flush_invalidate_region(void *start, int size)
  86. {
  87. reg_size_t aligned_start, v, cnt, end;
  88. aligned_start = register_align(start);
  89. v = aligned_start & ~(L1_CACHE_BYTES-1);
  90. end = (aligned_start + size + L1_CACHE_BYTES-1)
  91. & ~(L1_CACHE_BYTES-1);
  92. cnt = (end - v) / L1_CACHE_BYTES;
  93. while (cnt >= 8) {
  94. asm volatile("ocbi @%0" : : "r" (v));
  95. v += L1_CACHE_BYTES;
  96. asm volatile("ocbi @%0" : : "r" (v));
  97. v += L1_CACHE_BYTES;
  98. asm volatile("ocbi @%0" : : "r" (v));
  99. v += L1_CACHE_BYTES;
  100. asm volatile("ocbi @%0" : : "r" (v));
  101. v += L1_CACHE_BYTES;
  102. asm volatile("ocbi @%0" : : "r" (v));
  103. v += L1_CACHE_BYTES;
  104. asm volatile("ocbi @%0" : : "r" (v));
  105. v += L1_CACHE_BYTES;
  106. asm volatile("ocbi @%0" : : "r" (v));
  107. v += L1_CACHE_BYTES;
  108. asm volatile("ocbi @%0" : : "r" (v));
  109. v += L1_CACHE_BYTES;
  110. cnt -= 8;
  111. }
  112. while (cnt) {
  113. asm volatile("ocbi @%0" : : "r" (v));
  114. v += L1_CACHE_BYTES;
  115. cnt--;
  116. }
  117. }