cplbmgr.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. /*
  2. * Blackfin CPLB exception handling for when MPU in on
  3. *
  4. * Copyright 2008-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <asm/blackfin.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/cplb.h>
  13. #include <asm/cplbinit.h>
  14. #include <asm/mmu_context.h>
  15. /*
  16. * WARNING
  17. *
  18. * This file is compiled with certain -ffixed-reg options. We have to
  19. * make sure not to call any functions here that could clobber these
  20. * registers.
  21. */
  22. int page_mask_nelts;
  23. int page_mask_order;
  24. unsigned long *current_rwx_mask[NR_CPUS];
  25. int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
  26. int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
  27. int nr_cplb_flush[NR_CPUS];
  28. /*
  29. * Given the contents of the status register, return the index of the
  30. * CPLB that caused the fault.
  31. */
  32. static inline int faulting_cplb_index(int status)
  33. {
  34. int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
  35. return 30 - signbits;
  36. }
  37. /*
  38. * Given the contents of the status register and the DCPLB_DATA contents,
  39. * return true if a write access should be permitted.
  40. */
  41. static inline int write_permitted(int status, unsigned long data)
  42. {
  43. if (status & FAULT_USERSUPV)
  44. return !!(data & CPLB_SUPV_WR);
  45. else
  46. return !!(data & CPLB_USER_WR);
  47. }
  48. /* Counters to implement round-robin replacement. */
  49. static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
  50. /*
  51. * Find an ICPLB entry to be evicted and return its index.
  52. */
  53. static int evict_one_icplb(unsigned int cpu)
  54. {
  55. int i;
  56. for (i = first_switched_icplb; i < MAX_CPLBS; i++)
  57. if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  58. return i;
  59. i = first_switched_icplb + icplb_rr_index[cpu];
  60. if (i >= MAX_CPLBS) {
  61. i -= MAX_CPLBS - first_switched_icplb;
  62. icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
  63. }
  64. icplb_rr_index[cpu]++;
  65. return i;
  66. }
  67. static int evict_one_dcplb(unsigned int cpu)
  68. {
  69. int i;
  70. for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
  71. if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  72. return i;
  73. i = first_switched_dcplb + dcplb_rr_index[cpu];
  74. if (i >= MAX_CPLBS) {
  75. i -= MAX_CPLBS - first_switched_dcplb;
  76. dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
  77. }
  78. dcplb_rr_index[cpu]++;
  79. return i;
  80. }
  81. static noinline int dcplb_miss(unsigned int cpu)
  82. {
  83. unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
  84. int status = bfin_read_DCPLB_STATUS();
  85. unsigned long *mask;
  86. int idx;
  87. unsigned long d_data;
  88. nr_dcplb_miss[cpu]++;
  89. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  90. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  91. if (bfin_addr_dcacheable(addr)) {
  92. d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  93. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  94. d_data |= CPLB_L1_AOW | CPLB_WT;
  95. # endif
  96. }
  97. #endif
  98. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  99. addr = L2_START;
  100. d_data = L2_DMEMORY;
  101. } else if (addr >= physical_mem_end) {
  102. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
  103. mask = current_rwx_mask[cpu];
  104. if (mask) {
  105. int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
  106. int idx = page >> 5;
  107. int bit = 1 << (page & 31);
  108. if (mask[idx] & bit)
  109. d_data |= CPLB_USER_RD;
  110. }
  111. } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  112. && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
  113. addr &= ~(1 * 1024 * 1024 - 1);
  114. d_data &= ~PAGE_SIZE_4KB;
  115. d_data |= PAGE_SIZE_1MB;
  116. } else
  117. return CPLB_PROT_VIOL;
  118. } else if (addr >= _ramend) {
  119. d_data |= CPLB_USER_RD | CPLB_USER_WR;
  120. if (reserved_mem_dcache_on)
  121. d_data |= CPLB_L1_CHBL;
  122. } else {
  123. mask = current_rwx_mask[cpu];
  124. if (mask) {
  125. int page = addr >> PAGE_SHIFT;
  126. int idx = page >> 5;
  127. int bit = 1 << (page & 31);
  128. if (mask[idx] & bit)
  129. d_data |= CPLB_USER_RD;
  130. mask += page_mask_nelts;
  131. if (mask[idx] & bit)
  132. d_data |= CPLB_USER_WR;
  133. }
  134. }
  135. idx = evict_one_dcplb(cpu);
  136. addr &= PAGE_MASK;
  137. dcplb_tbl[cpu][idx].addr = addr;
  138. dcplb_tbl[cpu][idx].data = d_data;
  139. _disable_dcplb();
  140. bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
  141. bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
  142. _enable_dcplb();
  143. return 0;
  144. }
  145. static noinline int icplb_miss(unsigned int cpu)
  146. {
  147. unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
  148. int status = bfin_read_ICPLB_STATUS();
  149. int idx;
  150. unsigned long i_data;
  151. nr_icplb_miss[cpu]++;
  152. /* If inside the uncached DMA region, fault. */
  153. if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
  154. return CPLB_PROT_VIOL;
  155. if (status & FAULT_USERSUPV)
  156. nr_icplb_supv_miss[cpu]++;
  157. /*
  158. * First, try to find a CPLB that matches this address. If we
  159. * find one, then the fact that we're in the miss handler means
  160. * that the instruction crosses a page boundary.
  161. */
  162. for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
  163. if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
  164. unsigned long this_addr = icplb_tbl[cpu][idx].addr;
  165. if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
  166. addr += PAGE_SIZE;
  167. break;
  168. }
  169. }
  170. }
  171. i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
  172. #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
  173. /*
  174. * Normal RAM, and possibly the reserved memory area, are
  175. * cacheable.
  176. */
  177. if (addr < _ramend ||
  178. (addr < physical_mem_end && reserved_mem_icache_on))
  179. i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  180. #endif
  181. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  182. addr = L2_START;
  183. i_data = L2_IMEMORY;
  184. } else if (addr >= physical_mem_end) {
  185. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
  186. if (!(status & FAULT_USERSUPV)) {
  187. unsigned long *mask = current_rwx_mask[cpu];
  188. if (mask) {
  189. int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
  190. int idx = page >> 5;
  191. int bit = 1 << (page & 31);
  192. mask += 2 * page_mask_nelts;
  193. if (mask[idx] & bit)
  194. i_data |= CPLB_USER_RD;
  195. }
  196. }
  197. } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  198. && (status & FAULT_USERSUPV)) {
  199. addr &= ~(1 * 1024 * 1024 - 1);
  200. i_data &= ~PAGE_SIZE_4KB;
  201. i_data |= PAGE_SIZE_1MB;
  202. } else
  203. return CPLB_PROT_VIOL;
  204. } else if (addr >= _ramend) {
  205. i_data |= CPLB_USER_RD;
  206. if (reserved_mem_icache_on)
  207. i_data |= CPLB_L1_CHBL;
  208. } else {
  209. /*
  210. * Two cases to distinguish - a supervisor access must
  211. * necessarily be for a module page; we grant it
  212. * unconditionally (could do better here in the future).
  213. * Otherwise, check the x bitmap of the current process.
  214. */
  215. if (!(status & FAULT_USERSUPV)) {
  216. unsigned long *mask = current_rwx_mask[cpu];
  217. if (mask) {
  218. int page = addr >> PAGE_SHIFT;
  219. int idx = page >> 5;
  220. int bit = 1 << (page & 31);
  221. mask += 2 * page_mask_nelts;
  222. if (mask[idx] & bit)
  223. i_data |= CPLB_USER_RD;
  224. }
  225. }
  226. }
  227. idx = evict_one_icplb(cpu);
  228. addr &= PAGE_MASK;
  229. icplb_tbl[cpu][idx].addr = addr;
  230. icplb_tbl[cpu][idx].data = i_data;
  231. _disable_icplb();
  232. bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
  233. bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
  234. _enable_icplb();
  235. return 0;
  236. }
  237. static noinline int dcplb_protection_fault(unsigned int cpu)
  238. {
  239. int status = bfin_read_DCPLB_STATUS();
  240. nr_dcplb_prot[cpu]++;
  241. if (status & FAULT_RW) {
  242. int idx = faulting_cplb_index(status);
  243. unsigned long data = dcplb_tbl[cpu][idx].data;
  244. if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
  245. write_permitted(status, data)) {
  246. data |= CPLB_DIRTY;
  247. dcplb_tbl[cpu][idx].data = data;
  248. bfin_write32(DCPLB_DATA0 + idx * 4, data);
  249. return 0;
  250. }
  251. }
  252. return CPLB_PROT_VIOL;
  253. }
  254. int cplb_hdr(int seqstat, struct pt_regs *regs)
  255. {
  256. int cause = seqstat & 0x3f;
  257. unsigned int cpu = raw_smp_processor_id();
  258. switch (cause) {
  259. case 0x23:
  260. return dcplb_protection_fault(cpu);
  261. case 0x2C:
  262. return icplb_miss(cpu);
  263. case 0x26:
  264. return dcplb_miss(cpu);
  265. default:
  266. return 1;
  267. }
  268. }
  269. void flush_switched_cplbs(unsigned int cpu)
  270. {
  271. int i;
  272. unsigned long flags;
  273. nr_cplb_flush[cpu]++;
  274. local_irq_save_hw(flags);
  275. _disable_icplb();
  276. for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
  277. icplb_tbl[cpu][i].data = 0;
  278. bfin_write32(ICPLB_DATA0 + i * 4, 0);
  279. }
  280. _enable_icplb();
  281. _disable_dcplb();
  282. for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
  283. dcplb_tbl[cpu][i].data = 0;
  284. bfin_write32(DCPLB_DATA0 + i * 4, 0);
  285. }
  286. _enable_dcplb();
  287. local_irq_restore_hw(flags);
  288. }
  289. void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
  290. {
  291. int i;
  292. unsigned long addr = (unsigned long)masks;
  293. unsigned long d_data;
  294. unsigned long flags;
  295. if (!masks) {
  296. current_rwx_mask[cpu] = masks;
  297. return;
  298. }
  299. local_irq_save_hw(flags);
  300. current_rwx_mask[cpu] = masks;
  301. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  302. addr = L2_START;
  303. d_data = L2_DMEMORY;
  304. } else {
  305. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  306. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  307. d_data |= CPLB_L1_CHBL;
  308. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  309. d_data |= CPLB_L1_AOW | CPLB_WT;
  310. # endif
  311. #endif
  312. }
  313. _disable_dcplb();
  314. for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
  315. dcplb_tbl[cpu][i].addr = addr;
  316. dcplb_tbl[cpu][i].data = d_data;
  317. bfin_write32(DCPLB_DATA0 + i * 4, d_data);
  318. bfin_write32(DCPLB_ADDR0 + i * 4, addr);
  319. addr += PAGE_SIZE;
  320. }
  321. _enable_dcplb();
  322. local_irq_restore_hw(flags);
  323. }