cplbmgr.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /*
  2. * Blackfin CPLB exception handling for when MPU in on
  3. *
  4. * Copyright 2008-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <asm/blackfin.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/cplb.h>
  13. #include <asm/cplbinit.h>
  14. #include <asm/mmu_context.h>
  15. /*
  16. * WARNING
  17. *
  18. * This file is compiled with certain -ffixed-reg options. We have to
  19. * make sure not to call any functions here that could clobber these
  20. * registers.
  21. */
  22. int page_mask_nelts;
  23. int page_mask_order;
  24. unsigned long *current_rwx_mask[NR_CPUS];
  25. int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
  26. int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
  27. int nr_cplb_flush[NR_CPUS];
  28. /*
  29. * Given the contents of the status register, return the index of the
  30. * CPLB that caused the fault.
  31. */
  32. static inline int faulting_cplb_index(int status)
  33. {
  34. int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
  35. return 30 - signbits;
  36. }
  37. /*
  38. * Given the contents of the status register and the DCPLB_DATA contents,
  39. * return true if a write access should be permitted.
  40. */
  41. static inline int write_permitted(int status, unsigned long data)
  42. {
  43. if (status & FAULT_USERSUPV)
  44. return !!(data & CPLB_SUPV_WR);
  45. else
  46. return !!(data & CPLB_USER_WR);
  47. }
  48. /* Counters to implement round-robin replacement. */
  49. static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
  50. /*
  51. * Find an ICPLB entry to be evicted and return its index.
  52. */
  53. static int evict_one_icplb(unsigned int cpu)
  54. {
  55. int i;
  56. for (i = first_switched_icplb; i < MAX_CPLBS; i++)
  57. if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  58. return i;
  59. i = first_switched_icplb + icplb_rr_index[cpu];
  60. if (i >= MAX_CPLBS) {
  61. i -= MAX_CPLBS - first_switched_icplb;
  62. icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
  63. }
  64. icplb_rr_index[cpu]++;
  65. return i;
  66. }
  67. static int evict_one_dcplb(unsigned int cpu)
  68. {
  69. int i;
  70. for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
  71. if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  72. return i;
  73. i = first_switched_dcplb + dcplb_rr_index[cpu];
  74. if (i >= MAX_CPLBS) {
  75. i -= MAX_CPLBS - first_switched_dcplb;
  76. dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
  77. }
  78. dcplb_rr_index[cpu]++;
  79. return i;
  80. }
  81. static noinline int dcplb_miss(unsigned int cpu)
  82. {
  83. unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
  84. int status = bfin_read_DCPLB_STATUS();
  85. unsigned long *mask;
  86. int idx;
  87. unsigned long d_data;
  88. nr_dcplb_miss[cpu]++;
  89. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  90. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  91. if (bfin_addr_dcacheable(addr)) {
  92. d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  93. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  94. d_data |= CPLB_L1_AOW | CPLB_WT;
  95. # endif
  96. }
  97. #endif
  98. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  99. addr = L2_START;
  100. d_data = L2_DMEMORY;
  101. } else if (addr >= physical_mem_end) {
  102. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
  103. addr &= ~(4 * 1024 * 1024 - 1);
  104. d_data &= ~PAGE_SIZE_4KB;
  105. d_data |= PAGE_SIZE_4MB;
  106. d_data |= CPLB_USER_RD | CPLB_USER_WR;
  107. } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  108. && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
  109. addr &= ~(1 * 1024 * 1024 - 1);
  110. d_data &= ~PAGE_SIZE_4KB;
  111. d_data |= PAGE_SIZE_1MB;
  112. } else
  113. return CPLB_PROT_VIOL;
  114. } else if (addr >= _ramend) {
  115. d_data |= CPLB_USER_RD | CPLB_USER_WR;
  116. } else {
  117. mask = current_rwx_mask[cpu];
  118. if (mask) {
  119. int page = addr >> PAGE_SHIFT;
  120. int idx = page >> 5;
  121. int bit = 1 << (page & 31);
  122. if (mask[idx] & bit)
  123. d_data |= CPLB_USER_RD;
  124. mask += page_mask_nelts;
  125. if (mask[idx] & bit)
  126. d_data |= CPLB_USER_WR;
  127. }
  128. }
  129. idx = evict_one_dcplb(cpu);
  130. addr &= PAGE_MASK;
  131. dcplb_tbl[cpu][idx].addr = addr;
  132. dcplb_tbl[cpu][idx].data = d_data;
  133. _disable_dcplb();
  134. bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
  135. bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
  136. _enable_dcplb();
  137. return 0;
  138. }
  139. static noinline int icplb_miss(unsigned int cpu)
  140. {
  141. unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
  142. int status = bfin_read_ICPLB_STATUS();
  143. int idx;
  144. unsigned long i_data;
  145. nr_icplb_miss[cpu]++;
  146. /* If inside the uncached DMA region, fault. */
  147. if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
  148. return CPLB_PROT_VIOL;
  149. if (status & FAULT_USERSUPV)
  150. nr_icplb_supv_miss[cpu]++;
  151. /*
  152. * First, try to find a CPLB that matches this address. If we
  153. * find one, then the fact that we're in the miss handler means
  154. * that the instruction crosses a page boundary.
  155. */
  156. for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
  157. if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
  158. unsigned long this_addr = icplb_tbl[cpu][idx].addr;
  159. if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
  160. addr += PAGE_SIZE;
  161. break;
  162. }
  163. }
  164. }
  165. i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
  166. #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
  167. /*
  168. * Normal RAM, and possibly the reserved memory area, are
  169. * cacheable.
  170. */
  171. if (addr < _ramend ||
  172. (addr < physical_mem_end && reserved_mem_icache_on))
  173. i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  174. #endif
  175. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  176. addr = L2_START;
  177. i_data = L2_IMEMORY;
  178. } else if (addr >= physical_mem_end) {
  179. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
  180. addr &= ~(4 * 1024 * 1024 - 1);
  181. i_data &= ~PAGE_SIZE_4KB;
  182. i_data |= PAGE_SIZE_4MB;
  183. i_data |= CPLB_USER_RD;
  184. } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  185. && (status & FAULT_USERSUPV)) {
  186. addr &= ~(1 * 1024 * 1024 - 1);
  187. i_data &= ~PAGE_SIZE_4KB;
  188. i_data |= PAGE_SIZE_1MB;
  189. } else
  190. return CPLB_PROT_VIOL;
  191. } else if (addr >= _ramend) {
  192. i_data |= CPLB_USER_RD;
  193. } else {
  194. /*
  195. * Two cases to distinguish - a supervisor access must
  196. * necessarily be for a module page; we grant it
  197. * unconditionally (could do better here in the future).
  198. * Otherwise, check the x bitmap of the current process.
  199. */
  200. if (!(status & FAULT_USERSUPV)) {
  201. unsigned long *mask = current_rwx_mask[cpu];
  202. if (mask) {
  203. int page = addr >> PAGE_SHIFT;
  204. int idx = page >> 5;
  205. int bit = 1 << (page & 31);
  206. mask += 2 * page_mask_nelts;
  207. if (mask[idx] & bit)
  208. i_data |= CPLB_USER_RD;
  209. }
  210. }
  211. }
  212. idx = evict_one_icplb(cpu);
  213. addr &= PAGE_MASK;
  214. icplb_tbl[cpu][idx].addr = addr;
  215. icplb_tbl[cpu][idx].data = i_data;
  216. _disable_icplb();
  217. bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
  218. bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
  219. _enable_icplb();
  220. return 0;
  221. }
  222. static noinline int dcplb_protection_fault(unsigned int cpu)
  223. {
  224. int status = bfin_read_DCPLB_STATUS();
  225. nr_dcplb_prot[cpu]++;
  226. if (status & FAULT_RW) {
  227. int idx = faulting_cplb_index(status);
  228. unsigned long data = dcplb_tbl[cpu][idx].data;
  229. if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
  230. write_permitted(status, data)) {
  231. data |= CPLB_DIRTY;
  232. dcplb_tbl[cpu][idx].data = data;
  233. bfin_write32(DCPLB_DATA0 + idx * 4, data);
  234. return 0;
  235. }
  236. }
  237. return CPLB_PROT_VIOL;
  238. }
  239. int cplb_hdr(int seqstat, struct pt_regs *regs)
  240. {
  241. int cause = seqstat & 0x3f;
  242. unsigned int cpu = raw_smp_processor_id();
  243. switch (cause) {
  244. case 0x23:
  245. return dcplb_protection_fault(cpu);
  246. case 0x2C:
  247. return icplb_miss(cpu);
  248. case 0x26:
  249. return dcplb_miss(cpu);
  250. default:
  251. return 1;
  252. }
  253. }
  254. void flush_switched_cplbs(unsigned int cpu)
  255. {
  256. int i;
  257. unsigned long flags;
  258. nr_cplb_flush[cpu]++;
  259. local_irq_save_hw(flags);
  260. _disable_icplb();
  261. for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
  262. icplb_tbl[cpu][i].data = 0;
  263. bfin_write32(ICPLB_DATA0 + i * 4, 0);
  264. }
  265. _enable_icplb();
  266. _disable_dcplb();
  267. for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
  268. dcplb_tbl[cpu][i].data = 0;
  269. bfin_write32(DCPLB_DATA0 + i * 4, 0);
  270. }
  271. _enable_dcplb();
  272. local_irq_restore_hw(flags);
  273. }
  274. void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
  275. {
  276. int i;
  277. unsigned long addr = (unsigned long)masks;
  278. unsigned long d_data;
  279. unsigned long flags;
  280. if (!masks) {
  281. current_rwx_mask[cpu] = masks;
  282. return;
  283. }
  284. local_irq_save_hw(flags);
  285. current_rwx_mask[cpu] = masks;
  286. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  287. addr = L2_START;
  288. d_data = L2_DMEMORY;
  289. } else {
  290. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  291. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  292. d_data |= CPLB_L1_CHBL;
  293. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  294. d_data |= CPLB_L1_AOW | CPLB_WT;
  295. # endif
  296. #endif
  297. }
  298. _disable_dcplb();
  299. for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
  300. dcplb_tbl[cpu][i].addr = addr;
  301. dcplb_tbl[cpu][i].data = d_data;
  302. bfin_write32(DCPLB_DATA0 + i * 4, d_data);
  303. bfin_write32(DCPLB_ADDR0 + i * 4, addr);
  304. addr += PAGE_SIZE;
  305. }
  306. _enable_dcplb();
  307. local_irq_restore_hw(flags);
  308. }