cplbmgr.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /*
  2. * Blackfin CPLB exception handling.
  3. * Copyright 2004-2007 Analog Devices Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, see the file COPYING, or write
  17. * to the Free Software Foundation, Inc.,
  18. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <linux/module.h>
  21. #include <linux/mm.h>
  22. #include <asm/blackfin.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/cplb.h>
  25. #include <asm/cplbinit.h>
  26. #include <asm/mmu_context.h>
  27. /*
  28. * WARNING
  29. *
  30. * This file is compiled with certain -ffixed-reg options. We have to
  31. * make sure not to call any functions here that could clobber these
  32. * registers.
  33. */
  34. int page_mask_nelts;
  35. int page_mask_order;
  36. unsigned long *current_rwx_mask[NR_CPUS];
  37. int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
  38. int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
  39. int nr_cplb_flush[NR_CPUS];
  40. /*
  41. * Given the contents of the status register, return the index of the
  42. * CPLB that caused the fault.
  43. */
  44. static inline int faulting_cplb_index(int status)
  45. {
  46. int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
  47. return 30 - signbits;
  48. }
  49. /*
  50. * Given the contents of the status register and the DCPLB_DATA contents,
  51. * return true if a write access should be permitted.
  52. */
  53. static inline int write_permitted(int status, unsigned long data)
  54. {
  55. if (status & FAULT_USERSUPV)
  56. return !!(data & CPLB_SUPV_WR);
  57. else
  58. return !!(data & CPLB_USER_WR);
  59. }
  60. /* Counters to implement round-robin replacement. */
  61. static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
  62. /*
  63. * Find an ICPLB entry to be evicted and return its index.
  64. */
  65. static int evict_one_icplb(unsigned int cpu)
  66. {
  67. int i;
  68. for (i = first_switched_icplb; i < MAX_CPLBS; i++)
  69. if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  70. return i;
  71. i = first_switched_icplb + icplb_rr_index[cpu];
  72. if (i >= MAX_CPLBS) {
  73. i -= MAX_CPLBS - first_switched_icplb;
  74. icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
  75. }
  76. icplb_rr_index[cpu]++;
  77. return i;
  78. }
  79. static int evict_one_dcplb(unsigned int cpu)
  80. {
  81. int i;
  82. for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
  83. if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  84. return i;
  85. i = first_switched_dcplb + dcplb_rr_index[cpu];
  86. if (i >= MAX_CPLBS) {
  87. i -= MAX_CPLBS - first_switched_dcplb;
  88. dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
  89. }
  90. dcplb_rr_index[cpu]++;
  91. return i;
  92. }
  93. static noinline int dcplb_miss(unsigned int cpu)
  94. {
  95. unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
  96. int status = bfin_read_DCPLB_STATUS();
  97. unsigned long *mask;
  98. int idx;
  99. unsigned long d_data;
  100. nr_dcplb_miss[cpu]++;
  101. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  102. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  103. if (bfin_addr_dcacheable(addr)) {
  104. d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  105. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  106. d_data |= CPLB_L1_AOW | CPLB_WT;
  107. # endif
  108. }
  109. #endif
  110. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  111. addr = L2_START;
  112. d_data = L2_DMEMORY;
  113. } else if (addr >= physical_mem_end) {
  114. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
  115. && (status & FAULT_USERSUPV)) {
  116. addr &= ~0x3fffff;
  117. d_data &= ~PAGE_SIZE_4KB;
  118. d_data |= PAGE_SIZE_4MB;
  119. } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  120. && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
  121. addr &= ~(1 * 1024 * 1024 - 1);
  122. d_data &= ~PAGE_SIZE_4KB;
  123. d_data |= PAGE_SIZE_1MB;
  124. } else
  125. return CPLB_PROT_VIOL;
  126. } else if (addr >= _ramend) {
  127. d_data |= CPLB_USER_RD | CPLB_USER_WR;
  128. } else {
  129. mask = current_rwx_mask[cpu];
  130. if (mask) {
  131. int page = addr >> PAGE_SHIFT;
  132. int idx = page >> 5;
  133. int bit = 1 << (page & 31);
  134. if (mask[idx] & bit)
  135. d_data |= CPLB_USER_RD;
  136. mask += page_mask_nelts;
  137. if (mask[idx] & bit)
  138. d_data |= CPLB_USER_WR;
  139. }
  140. }
  141. idx = evict_one_dcplb(cpu);
  142. addr &= PAGE_MASK;
  143. dcplb_tbl[cpu][idx].addr = addr;
  144. dcplb_tbl[cpu][idx].data = d_data;
  145. _disable_dcplb();
  146. bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
  147. bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
  148. _enable_dcplb();
  149. return 0;
  150. }
  151. static noinline int icplb_miss(unsigned int cpu)
  152. {
  153. unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
  154. int status = bfin_read_ICPLB_STATUS();
  155. int idx;
  156. unsigned long i_data;
  157. nr_icplb_miss[cpu]++;
  158. /* If inside the uncached DMA region, fault. */
  159. if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
  160. return CPLB_PROT_VIOL;
  161. if (status & FAULT_USERSUPV)
  162. nr_icplb_supv_miss[cpu]++;
  163. /*
  164. * First, try to find a CPLB that matches this address. If we
  165. * find one, then the fact that we're in the miss handler means
  166. * that the instruction crosses a page boundary.
  167. */
  168. for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
  169. if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
  170. unsigned long this_addr = icplb_tbl[cpu][idx].addr;
  171. if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
  172. addr += PAGE_SIZE;
  173. break;
  174. }
  175. }
  176. }
  177. i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
  178. #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
  179. /*
  180. * Normal RAM, and possibly the reserved memory area, are
  181. * cacheable.
  182. */
  183. if (addr < _ramend ||
  184. (addr < physical_mem_end && reserved_mem_icache_on))
  185. i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  186. #endif
  187. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  188. addr = L2_START;
  189. i_data = L2_IMEMORY;
  190. } else if (addr >= physical_mem_end) {
  191. if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  192. && (status & FAULT_USERSUPV)) {
  193. addr &= ~(1 * 1024 * 1024 - 1);
  194. i_data &= ~PAGE_SIZE_4KB;
  195. i_data |= PAGE_SIZE_1MB;
  196. } else
  197. return CPLB_PROT_VIOL;
  198. } else if (addr >= _ramend) {
  199. i_data |= CPLB_USER_RD;
  200. } else {
  201. /*
  202. * Two cases to distinguish - a supervisor access must
  203. * necessarily be for a module page; we grant it
  204. * unconditionally (could do better here in the future).
  205. * Otherwise, check the x bitmap of the current process.
  206. */
  207. if (!(status & FAULT_USERSUPV)) {
  208. unsigned long *mask = current_rwx_mask[cpu];
  209. if (mask) {
  210. int page = addr >> PAGE_SHIFT;
  211. int idx = page >> 5;
  212. int bit = 1 << (page & 31);
  213. mask += 2 * page_mask_nelts;
  214. if (mask[idx] & bit)
  215. i_data |= CPLB_USER_RD;
  216. }
  217. }
  218. }
  219. idx = evict_one_icplb(cpu);
  220. addr &= PAGE_MASK;
  221. icplb_tbl[cpu][idx].addr = addr;
  222. icplb_tbl[cpu][idx].data = i_data;
  223. _disable_icplb();
  224. bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
  225. bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
  226. _enable_icplb();
  227. return 0;
  228. }
  229. static noinline int dcplb_protection_fault(unsigned int cpu)
  230. {
  231. int status = bfin_read_DCPLB_STATUS();
  232. nr_dcplb_prot[cpu]++;
  233. if (status & FAULT_RW) {
  234. int idx = faulting_cplb_index(status);
  235. unsigned long data = dcplb_tbl[cpu][idx].data;
  236. if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
  237. write_permitted(status, data)) {
  238. data |= CPLB_DIRTY;
  239. dcplb_tbl[cpu][idx].data = data;
  240. bfin_write32(DCPLB_DATA0 + idx * 4, data);
  241. return 0;
  242. }
  243. }
  244. return CPLB_PROT_VIOL;
  245. }
  246. int cplb_hdr(int seqstat, struct pt_regs *regs)
  247. {
  248. int cause = seqstat & 0x3f;
  249. unsigned int cpu = raw_smp_processor_id();
  250. switch (cause) {
  251. case 0x23:
  252. return dcplb_protection_fault(cpu);
  253. case 0x2C:
  254. return icplb_miss(cpu);
  255. case 0x26:
  256. return dcplb_miss(cpu);
  257. default:
  258. return 1;
  259. }
  260. }
  261. void flush_switched_cplbs(unsigned int cpu)
  262. {
  263. int i;
  264. unsigned long flags;
  265. nr_cplb_flush[cpu]++;
  266. local_irq_save_hw(flags);
  267. _disable_icplb();
  268. for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
  269. icplb_tbl[cpu][i].data = 0;
  270. bfin_write32(ICPLB_DATA0 + i * 4, 0);
  271. }
  272. _enable_icplb();
  273. _disable_dcplb();
  274. for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
  275. dcplb_tbl[cpu][i].data = 0;
  276. bfin_write32(DCPLB_DATA0 + i * 4, 0);
  277. }
  278. _enable_dcplb();
  279. local_irq_restore_hw(flags);
  280. }
  281. void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
  282. {
  283. int i;
  284. unsigned long addr = (unsigned long)masks;
  285. unsigned long d_data;
  286. unsigned long flags;
  287. if (!masks) {
  288. current_rwx_mask[cpu] = masks;
  289. return;
  290. }
  291. local_irq_save_hw(flags);
  292. current_rwx_mask[cpu] = masks;
  293. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  294. addr = L2_START;
  295. d_data = L2_DMEMORY;
  296. } else {
  297. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  298. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  299. d_data |= CPLB_L1_CHBL;
  300. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  301. d_data |= CPLB_L1_AOW | CPLB_WT;
  302. # endif
  303. #endif
  304. }
  305. _disable_dcplb();
  306. for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
  307. dcplb_tbl[cpu][i].addr = addr;
  308. dcplb_tbl[cpu][i].data = d_data;
  309. bfin_write32(DCPLB_DATA0 + i * 4, d_data);
  310. bfin_write32(DCPLB_ADDR0 + i * 4, addr);
  311. addr += PAGE_SIZE;
  312. }
  313. _enable_dcplb();
  314. local_irq_restore_hw(flags);
  315. }