cplbmgr.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. /*
  2. * Blackfin CPLB exception handling for when MPU in on
  3. *
  4. * Copyright 2008-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <asm/blackfin.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/cplb.h>
  13. #include <asm/cplbinit.h>
  14. #include <asm/mmu_context.h>
  15. /*
  16. * WARNING
  17. *
  18. * This file is compiled with certain -ffixed-reg options. We have to
  19. * make sure not to call any functions here that could clobber these
  20. * registers.
  21. */
  22. int page_mask_nelts;
  23. int page_mask_order;
  24. unsigned long *current_rwx_mask[NR_CPUS];
  25. int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
  26. int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
  27. int nr_cplb_flush[NR_CPUS];
  28. /*
  29. * Given the contents of the status register, return the index of the
  30. * CPLB that caused the fault.
  31. */
  32. static inline int faulting_cplb_index(int status)
  33. {
  34. int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
  35. return 30 - signbits;
  36. }
  37. /*
  38. * Given the contents of the status register and the DCPLB_DATA contents,
  39. * return true if a write access should be permitted.
  40. */
  41. static inline int write_permitted(int status, unsigned long data)
  42. {
  43. if (status & FAULT_USERSUPV)
  44. return !!(data & CPLB_SUPV_WR);
  45. else
  46. return !!(data & CPLB_USER_WR);
  47. }
  48. /* Counters to implement round-robin replacement. */
  49. static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
  50. /*
  51. * Find an ICPLB entry to be evicted and return its index.
  52. */
  53. static int evict_one_icplb(unsigned int cpu)
  54. {
  55. int i;
  56. for (i = first_switched_icplb; i < MAX_CPLBS; i++)
  57. if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  58. return i;
  59. i = first_switched_icplb + icplb_rr_index[cpu];
  60. if (i >= MAX_CPLBS) {
  61. i -= MAX_CPLBS - first_switched_icplb;
  62. icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
  63. }
  64. icplb_rr_index[cpu]++;
  65. return i;
  66. }
  67. static int evict_one_dcplb(unsigned int cpu)
  68. {
  69. int i;
  70. for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
  71. if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  72. return i;
  73. i = first_switched_dcplb + dcplb_rr_index[cpu];
  74. if (i >= MAX_CPLBS) {
  75. i -= MAX_CPLBS - first_switched_dcplb;
  76. dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
  77. }
  78. dcplb_rr_index[cpu]++;
  79. return i;
  80. }
  81. static noinline int dcplb_miss(unsigned int cpu)
  82. {
  83. unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
  84. int status = bfin_read_DCPLB_STATUS();
  85. unsigned long *mask;
  86. int idx;
  87. unsigned long d_data;
  88. nr_dcplb_miss[cpu]++;
  89. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  90. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  91. if (bfin_addr_dcacheable(addr)) {
  92. d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  93. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  94. d_data |= CPLB_L1_AOW | CPLB_WT;
  95. # endif
  96. }
  97. #endif
  98. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  99. addr = L2_START;
  100. d_data = L2_DMEMORY;
  101. } else if (addr >= physical_mem_end) {
  102. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
  103. mask = current_rwx_mask[cpu];
  104. if (mask) {
  105. int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
  106. int idx = page >> 5;
  107. int bit = 1 << (page & 31);
  108. if (mask[idx] & bit)
  109. d_data |= CPLB_USER_RD;
  110. }
  111. } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  112. && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
  113. addr &= ~(1 * 1024 * 1024 - 1);
  114. d_data &= ~PAGE_SIZE_4KB;
  115. d_data |= PAGE_SIZE_1MB;
  116. } else
  117. return CPLB_PROT_VIOL;
  118. } else if (addr >= _ramend) {
  119. d_data |= CPLB_USER_RD | CPLB_USER_WR;
  120. } else {
  121. mask = current_rwx_mask[cpu];
  122. if (mask) {
  123. int page = addr >> PAGE_SHIFT;
  124. int idx = page >> 5;
  125. int bit = 1 << (page & 31);
  126. if (mask[idx] & bit)
  127. d_data |= CPLB_USER_RD;
  128. mask += page_mask_nelts;
  129. if (mask[idx] & bit)
  130. d_data |= CPLB_USER_WR;
  131. }
  132. }
  133. idx = evict_one_dcplb(cpu);
  134. addr &= PAGE_MASK;
  135. dcplb_tbl[cpu][idx].addr = addr;
  136. dcplb_tbl[cpu][idx].data = d_data;
  137. _disable_dcplb();
  138. bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
  139. bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
  140. _enable_dcplb();
  141. return 0;
  142. }
  143. static noinline int icplb_miss(unsigned int cpu)
  144. {
  145. unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
  146. int status = bfin_read_ICPLB_STATUS();
  147. int idx;
  148. unsigned long i_data;
  149. nr_icplb_miss[cpu]++;
  150. /* If inside the uncached DMA region, fault. */
  151. if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
  152. return CPLB_PROT_VIOL;
  153. if (status & FAULT_USERSUPV)
  154. nr_icplb_supv_miss[cpu]++;
  155. /*
  156. * First, try to find a CPLB that matches this address. If we
  157. * find one, then the fact that we're in the miss handler means
  158. * that the instruction crosses a page boundary.
  159. */
  160. for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
  161. if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
  162. unsigned long this_addr = icplb_tbl[cpu][idx].addr;
  163. if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
  164. addr += PAGE_SIZE;
  165. break;
  166. }
  167. }
  168. }
  169. i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
  170. #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
  171. /*
  172. * Normal RAM, and possibly the reserved memory area, are
  173. * cacheable.
  174. */
  175. if (addr < _ramend ||
  176. (addr < physical_mem_end && reserved_mem_icache_on))
  177. i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  178. #endif
  179. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  180. addr = L2_START;
  181. i_data = L2_IMEMORY;
  182. } else if (addr >= physical_mem_end) {
  183. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
  184. if (!(status & FAULT_USERSUPV)) {
  185. unsigned long *mask = current_rwx_mask[cpu];
  186. if (mask) {
  187. int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
  188. int idx = page >> 5;
  189. int bit = 1 << (page & 31);
  190. mask += 2 * page_mask_nelts;
  191. if (mask[idx] & bit)
  192. i_data |= CPLB_USER_RD;
  193. }
  194. }
  195. } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  196. && (status & FAULT_USERSUPV)) {
  197. addr &= ~(1 * 1024 * 1024 - 1);
  198. i_data &= ~PAGE_SIZE_4KB;
  199. i_data |= PAGE_SIZE_1MB;
  200. } else
  201. return CPLB_PROT_VIOL;
  202. } else if (addr >= _ramend) {
  203. i_data |= CPLB_USER_RD;
  204. } else {
  205. /*
  206. * Two cases to distinguish - a supervisor access must
  207. * necessarily be for a module page; we grant it
  208. * unconditionally (could do better here in the future).
  209. * Otherwise, check the x bitmap of the current process.
  210. */
  211. if (!(status & FAULT_USERSUPV)) {
  212. unsigned long *mask = current_rwx_mask[cpu];
  213. if (mask) {
  214. int page = addr >> PAGE_SHIFT;
  215. int idx = page >> 5;
  216. int bit = 1 << (page & 31);
  217. mask += 2 * page_mask_nelts;
  218. if (mask[idx] & bit)
  219. i_data |= CPLB_USER_RD;
  220. }
  221. }
  222. }
  223. idx = evict_one_icplb(cpu);
  224. addr &= PAGE_MASK;
  225. icplb_tbl[cpu][idx].addr = addr;
  226. icplb_tbl[cpu][idx].data = i_data;
  227. _disable_icplb();
  228. bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
  229. bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
  230. _enable_icplb();
  231. return 0;
  232. }
  233. static noinline int dcplb_protection_fault(unsigned int cpu)
  234. {
  235. int status = bfin_read_DCPLB_STATUS();
  236. nr_dcplb_prot[cpu]++;
  237. if (status & FAULT_RW) {
  238. int idx = faulting_cplb_index(status);
  239. unsigned long data = dcplb_tbl[cpu][idx].data;
  240. if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
  241. write_permitted(status, data)) {
  242. data |= CPLB_DIRTY;
  243. dcplb_tbl[cpu][idx].data = data;
  244. bfin_write32(DCPLB_DATA0 + idx * 4, data);
  245. return 0;
  246. }
  247. }
  248. return CPLB_PROT_VIOL;
  249. }
  250. int cplb_hdr(int seqstat, struct pt_regs *regs)
  251. {
  252. int cause = seqstat & 0x3f;
  253. unsigned int cpu = raw_smp_processor_id();
  254. switch (cause) {
  255. case 0x23:
  256. return dcplb_protection_fault(cpu);
  257. case 0x2C:
  258. return icplb_miss(cpu);
  259. case 0x26:
  260. return dcplb_miss(cpu);
  261. default:
  262. return 1;
  263. }
  264. }
  265. void flush_switched_cplbs(unsigned int cpu)
  266. {
  267. int i;
  268. unsigned long flags;
  269. nr_cplb_flush[cpu]++;
  270. local_irq_save_hw(flags);
  271. _disable_icplb();
  272. for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
  273. icplb_tbl[cpu][i].data = 0;
  274. bfin_write32(ICPLB_DATA0 + i * 4, 0);
  275. }
  276. _enable_icplb();
  277. _disable_dcplb();
  278. for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
  279. dcplb_tbl[cpu][i].data = 0;
  280. bfin_write32(DCPLB_DATA0 + i * 4, 0);
  281. }
  282. _enable_dcplb();
  283. local_irq_restore_hw(flags);
  284. }
  285. void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
  286. {
  287. int i;
  288. unsigned long addr = (unsigned long)masks;
  289. unsigned long d_data;
  290. unsigned long flags;
  291. if (!masks) {
  292. current_rwx_mask[cpu] = masks;
  293. return;
  294. }
  295. local_irq_save_hw(flags);
  296. current_rwx_mask[cpu] = masks;
  297. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  298. addr = L2_START;
  299. d_data = L2_DMEMORY;
  300. } else {
  301. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  302. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  303. d_data |= CPLB_L1_CHBL;
  304. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  305. d_data |= CPLB_L1_AOW | CPLB_WT;
  306. # endif
  307. #endif
  308. }
  309. _disable_dcplb();
  310. for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
  311. dcplb_tbl[cpu][i].addr = addr;
  312. dcplb_tbl[cpu][i].data = d_data;
  313. bfin_write32(DCPLB_DATA0 + i * 4, d_data);
  314. bfin_write32(DCPLB_ADDR0 + i * 4, addr);
  315. addr += PAGE_SIZE;
  316. }
  317. _enable_dcplb();
  318. local_irq_restore_hw(flags);
  319. }