cplbmgr.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. /*
  2. * Blackfin CPLB exception handling for when MPU in on
  3. *
  4. * Copyright 2008-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <asm/blackfin.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/cplb.h>
  13. #include <asm/cplbinit.h>
  14. #include <asm/mmu_context.h>
  15. /*
  16. * WARNING
  17. *
  18. * This file is compiled with certain -ffixed-reg options. We have to
  19. * make sure not to call any functions here that could clobber these
  20. * registers.
  21. */
  22. int page_mask_nelts;
  23. int page_mask_order;
  24. unsigned long *current_rwx_mask[NR_CPUS];
  25. int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
  26. int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
  27. int nr_cplb_flush[NR_CPUS];
  28. /*
  29. * Given the contents of the status register, return the index of the
  30. * CPLB that caused the fault.
  31. */
  32. static inline int faulting_cplb_index(int status)
  33. {
  34. int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
  35. return 30 - signbits;
  36. }
  37. /*
  38. * Given the contents of the status register and the DCPLB_DATA contents,
  39. * return true if a write access should be permitted.
  40. */
  41. static inline int write_permitted(int status, unsigned long data)
  42. {
  43. if (status & FAULT_USERSUPV)
  44. return !!(data & CPLB_SUPV_WR);
  45. else
  46. return !!(data & CPLB_USER_WR);
  47. }
  48. /* Counters to implement round-robin replacement. */
  49. static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
  50. /*
  51. * Find an ICPLB entry to be evicted and return its index.
  52. */
  53. static int evict_one_icplb(unsigned int cpu)
  54. {
  55. int i;
  56. for (i = first_switched_icplb; i < MAX_CPLBS; i++)
  57. if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  58. return i;
  59. i = first_switched_icplb + icplb_rr_index[cpu];
  60. if (i >= MAX_CPLBS) {
  61. i -= MAX_CPLBS - first_switched_icplb;
  62. icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
  63. }
  64. icplb_rr_index[cpu]++;
  65. return i;
  66. }
  67. static int evict_one_dcplb(unsigned int cpu)
  68. {
  69. int i;
  70. for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
  71. if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  72. return i;
  73. i = first_switched_dcplb + dcplb_rr_index[cpu];
  74. if (i >= MAX_CPLBS) {
  75. i -= MAX_CPLBS - first_switched_dcplb;
  76. dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
  77. }
  78. dcplb_rr_index[cpu]++;
  79. return i;
  80. }
  81. static noinline int dcplb_miss(unsigned int cpu)
  82. {
  83. unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
  84. int status = bfin_read_DCPLB_STATUS();
  85. unsigned long *mask;
  86. int idx;
  87. unsigned long d_data;
  88. nr_dcplb_miss[cpu]++;
  89. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  90. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  91. if (bfin_addr_dcacheable(addr)) {
  92. d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  93. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  94. d_data |= CPLB_L1_AOW | CPLB_WT;
  95. # endif
  96. }
  97. #endif
  98. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  99. addr = L2_START;
  100. d_data = L2_DMEMORY;
  101. } else if (addr >= physical_mem_end) {
  102. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
  103. && (status & FAULT_USERSUPV)) {
  104. addr &= ~0x3fffff;
  105. d_data &= ~PAGE_SIZE_4KB;
  106. d_data |= PAGE_SIZE_4MB;
  107. } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  108. && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
  109. addr &= ~(1 * 1024 * 1024 - 1);
  110. d_data &= ~PAGE_SIZE_4KB;
  111. d_data |= PAGE_SIZE_1MB;
  112. } else
  113. return CPLB_PROT_VIOL;
  114. } else if (addr >= _ramend) {
  115. d_data |= CPLB_USER_RD | CPLB_USER_WR;
  116. } else {
  117. mask = current_rwx_mask[cpu];
  118. if (mask) {
  119. int page = addr >> PAGE_SHIFT;
  120. int idx = page >> 5;
  121. int bit = 1 << (page & 31);
  122. if (mask[idx] & bit)
  123. d_data |= CPLB_USER_RD;
  124. mask += page_mask_nelts;
  125. if (mask[idx] & bit)
  126. d_data |= CPLB_USER_WR;
  127. }
  128. }
  129. idx = evict_one_dcplb(cpu);
  130. addr &= PAGE_MASK;
  131. dcplb_tbl[cpu][idx].addr = addr;
  132. dcplb_tbl[cpu][idx].data = d_data;
  133. _disable_dcplb();
  134. bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
  135. bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
  136. _enable_dcplb();
  137. return 0;
  138. }
  139. static noinline int icplb_miss(unsigned int cpu)
  140. {
  141. unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
  142. int status = bfin_read_ICPLB_STATUS();
  143. int idx;
  144. unsigned long i_data;
  145. nr_icplb_miss[cpu]++;
  146. /* If inside the uncached DMA region, fault. */
  147. if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
  148. return CPLB_PROT_VIOL;
  149. if (status & FAULT_USERSUPV)
  150. nr_icplb_supv_miss[cpu]++;
  151. /*
  152. * First, try to find a CPLB that matches this address. If we
  153. * find one, then the fact that we're in the miss handler means
  154. * that the instruction crosses a page boundary.
  155. */
  156. for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
  157. if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
  158. unsigned long this_addr = icplb_tbl[cpu][idx].addr;
  159. if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
  160. addr += PAGE_SIZE;
  161. break;
  162. }
  163. }
  164. }
  165. i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
  166. #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
  167. /*
  168. * Normal RAM, and possibly the reserved memory area, are
  169. * cacheable.
  170. */
  171. if (addr < _ramend ||
  172. (addr < physical_mem_end && reserved_mem_icache_on))
  173. i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  174. #endif
  175. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  176. addr = L2_START;
  177. i_data = L2_IMEMORY;
  178. } else if (addr >= physical_mem_end) {
  179. if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  180. && (status & FAULT_USERSUPV)) {
  181. addr &= ~(1 * 1024 * 1024 - 1);
  182. i_data &= ~PAGE_SIZE_4KB;
  183. i_data |= PAGE_SIZE_1MB;
  184. } else
  185. return CPLB_PROT_VIOL;
  186. } else if (addr >= _ramend) {
  187. i_data |= CPLB_USER_RD;
  188. } else {
  189. /*
  190. * Two cases to distinguish - a supervisor access must
  191. * necessarily be for a module page; we grant it
  192. * unconditionally (could do better here in the future).
  193. * Otherwise, check the x bitmap of the current process.
  194. */
  195. if (!(status & FAULT_USERSUPV)) {
  196. unsigned long *mask = current_rwx_mask[cpu];
  197. if (mask) {
  198. int page = addr >> PAGE_SHIFT;
  199. int idx = page >> 5;
  200. int bit = 1 << (page & 31);
  201. mask += 2 * page_mask_nelts;
  202. if (mask[idx] & bit)
  203. i_data |= CPLB_USER_RD;
  204. }
  205. }
  206. }
  207. idx = evict_one_icplb(cpu);
  208. addr &= PAGE_MASK;
  209. icplb_tbl[cpu][idx].addr = addr;
  210. icplb_tbl[cpu][idx].data = i_data;
  211. _disable_icplb();
  212. bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
  213. bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
  214. _enable_icplb();
  215. return 0;
  216. }
  217. static noinline int dcplb_protection_fault(unsigned int cpu)
  218. {
  219. int status = bfin_read_DCPLB_STATUS();
  220. nr_dcplb_prot[cpu]++;
  221. if (status & FAULT_RW) {
  222. int idx = faulting_cplb_index(status);
  223. unsigned long data = dcplb_tbl[cpu][idx].data;
  224. if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
  225. write_permitted(status, data)) {
  226. data |= CPLB_DIRTY;
  227. dcplb_tbl[cpu][idx].data = data;
  228. bfin_write32(DCPLB_DATA0 + idx * 4, data);
  229. return 0;
  230. }
  231. }
  232. return CPLB_PROT_VIOL;
  233. }
  234. int cplb_hdr(int seqstat, struct pt_regs *regs)
  235. {
  236. int cause = seqstat & 0x3f;
  237. unsigned int cpu = raw_smp_processor_id();
  238. switch (cause) {
  239. case 0x23:
  240. return dcplb_protection_fault(cpu);
  241. case 0x2C:
  242. return icplb_miss(cpu);
  243. case 0x26:
  244. return dcplb_miss(cpu);
  245. default:
  246. return 1;
  247. }
  248. }
  249. void flush_switched_cplbs(unsigned int cpu)
  250. {
  251. int i;
  252. unsigned long flags;
  253. nr_cplb_flush[cpu]++;
  254. local_irq_save_hw(flags);
  255. _disable_icplb();
  256. for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
  257. icplb_tbl[cpu][i].data = 0;
  258. bfin_write32(ICPLB_DATA0 + i * 4, 0);
  259. }
  260. _enable_icplb();
  261. _disable_dcplb();
  262. for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
  263. dcplb_tbl[cpu][i].data = 0;
  264. bfin_write32(DCPLB_DATA0 + i * 4, 0);
  265. }
  266. _enable_dcplb();
  267. local_irq_restore_hw(flags);
  268. }
  269. void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
  270. {
  271. int i;
  272. unsigned long addr = (unsigned long)masks;
  273. unsigned long d_data;
  274. unsigned long flags;
  275. if (!masks) {
  276. current_rwx_mask[cpu] = masks;
  277. return;
  278. }
  279. local_irq_save_hw(flags);
  280. current_rwx_mask[cpu] = masks;
  281. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  282. addr = L2_START;
  283. d_data = L2_DMEMORY;
  284. } else {
  285. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  286. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  287. d_data |= CPLB_L1_CHBL;
  288. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  289. d_data |= CPLB_L1_AOW | CPLB_WT;
  290. # endif
  291. #endif
  292. }
  293. _disable_dcplb();
  294. for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
  295. dcplb_tbl[cpu][i].addr = addr;
  296. dcplb_tbl[cpu][i].data = d_data;
  297. bfin_write32(DCPLB_DATA0 + i * 4, d_data);
  298. bfin_write32(DCPLB_ADDR0 + i * 4, addr);
  299. addr += PAGE_SIZE;
  300. }
  301. _enable_dcplb();
  302. local_irq_restore_hw(flags);
  303. }