cplbmgr.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. /*
  2. * Blackfin CPLB exception handling.
  3. * Copyright 2004-2007 Analog Devices Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, see the file COPYING, or write
  17. * to the Free Software Foundation, Inc.,
  18. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <linux/module.h>
  21. #include <linux/mm.h>
  22. #include <asm/blackfin.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/cplbinit.h>
  25. #include <asm/mmu_context.h>
  26. #define FAULT_RW (1 << 16)
  27. #define FAULT_USERSUPV (1 << 17)
  28. int page_mask_nelts;
  29. int page_mask_order;
  30. unsigned long *current_rwx_mask[NR_CPUS];
  31. int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
  32. int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
  33. int nr_cplb_flush[NR_CPUS];
  34. static inline void disable_dcplb(void)
  35. {
  36. unsigned long ctrl;
  37. SSYNC();
  38. ctrl = bfin_read_DMEM_CONTROL();
  39. ctrl &= ~ENDCPLB;
  40. bfin_write_DMEM_CONTROL(ctrl);
  41. SSYNC();
  42. }
  43. static inline void enable_dcplb(void)
  44. {
  45. unsigned long ctrl;
  46. SSYNC();
  47. ctrl = bfin_read_DMEM_CONTROL();
  48. ctrl |= ENDCPLB;
  49. bfin_write_DMEM_CONTROL(ctrl);
  50. SSYNC();
  51. }
  52. static inline void disable_icplb(void)
  53. {
  54. unsigned long ctrl;
  55. SSYNC();
  56. ctrl = bfin_read_IMEM_CONTROL();
  57. ctrl &= ~ENICPLB;
  58. bfin_write_IMEM_CONTROL(ctrl);
  59. SSYNC();
  60. }
  61. static inline void enable_icplb(void)
  62. {
  63. unsigned long ctrl;
  64. SSYNC();
  65. ctrl = bfin_read_IMEM_CONTROL();
  66. ctrl |= ENICPLB;
  67. bfin_write_IMEM_CONTROL(ctrl);
  68. SSYNC();
  69. }
  70. /*
  71. * Given the contents of the status register, return the index of the
  72. * CPLB that caused the fault.
  73. */
  74. static inline int faulting_cplb_index(int status)
  75. {
  76. int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
  77. return 30 - signbits;
  78. }
  79. /*
  80. * Given the contents of the status register and the DCPLB_DATA contents,
  81. * return true if a write access should be permitted.
  82. */
  83. static inline int write_permitted(int status, unsigned long data)
  84. {
  85. if (status & FAULT_USERSUPV)
  86. return !!(data & CPLB_SUPV_WR);
  87. else
  88. return !!(data & CPLB_USER_WR);
  89. }
  90. /* Counters to implement round-robin replacement. */
  91. static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
  92. /*
  93. * Find an ICPLB entry to be evicted and return its index.
  94. */
  95. static int evict_one_icplb(unsigned int cpu)
  96. {
  97. int i;
  98. for (i = first_switched_icplb; i < MAX_CPLBS; i++)
  99. if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  100. return i;
  101. i = first_switched_icplb + icplb_rr_index[cpu];
  102. if (i >= MAX_CPLBS) {
  103. i -= MAX_CPLBS - first_switched_icplb;
  104. icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
  105. }
  106. icplb_rr_index[cpu]++;
  107. return i;
  108. }
  109. static int evict_one_dcplb(unsigned int cpu)
  110. {
  111. int i;
  112. for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
  113. if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  114. return i;
  115. i = first_switched_dcplb + dcplb_rr_index[cpu];
  116. if (i >= MAX_CPLBS) {
  117. i -= MAX_CPLBS - first_switched_dcplb;
  118. dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
  119. }
  120. dcplb_rr_index[cpu]++;
  121. return i;
  122. }
  123. static noinline int dcplb_miss(unsigned int cpu)
  124. {
  125. unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
  126. int status = bfin_read_DCPLB_STATUS();
  127. unsigned long *mask;
  128. int idx;
  129. unsigned long d_data;
  130. nr_dcplb_miss[cpu]++;
  131. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  132. #ifdef CONFIG_BFIN_DCACHE
  133. if (bfin_addr_dcachable(addr)) {
  134. d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  135. #ifdef CONFIG_BFIN_WT
  136. d_data |= CPLB_L1_AOW | CPLB_WT;
  137. #endif
  138. }
  139. #endif
  140. if (addr >= physical_mem_end) {
  141. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
  142. && (status & FAULT_USERSUPV)) {
  143. addr &= ~0x3fffff;
  144. d_data &= ~PAGE_SIZE_4KB;
  145. d_data |= PAGE_SIZE_4MB;
  146. } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  147. && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
  148. addr &= ~(1 * 1024 * 1024 - 1);
  149. d_data &= ~PAGE_SIZE_4KB;
  150. d_data |= PAGE_SIZE_1MB;
  151. } else
  152. return CPLB_PROT_VIOL;
  153. } else if (addr >= _ramend) {
  154. d_data |= CPLB_USER_RD | CPLB_USER_WR;
  155. } else {
  156. mask = current_rwx_mask[cpu];
  157. if (mask) {
  158. int page = addr >> PAGE_SHIFT;
  159. int idx = page >> 5;
  160. int bit = 1 << (page & 31);
  161. if (mask[idx] & bit)
  162. d_data |= CPLB_USER_RD;
  163. mask += page_mask_nelts;
  164. if (mask[idx] & bit)
  165. d_data |= CPLB_USER_WR;
  166. }
  167. }
  168. idx = evict_one_dcplb(cpu);
  169. addr &= PAGE_MASK;
  170. dcplb_tbl[cpu][idx].addr = addr;
  171. dcplb_tbl[cpu][idx].data = d_data;
  172. disable_dcplb();
  173. bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
  174. bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
  175. enable_dcplb();
  176. return 0;
  177. }
  178. static noinline int icplb_miss(unsigned int cpu)
  179. {
  180. unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
  181. int status = bfin_read_ICPLB_STATUS();
  182. int idx;
  183. unsigned long i_data;
  184. nr_icplb_miss[cpu]++;
  185. /* If inside the uncached DMA region, fault. */
  186. if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
  187. return CPLB_PROT_VIOL;
  188. if (status & FAULT_USERSUPV)
  189. nr_icplb_supv_miss[cpu]++;
  190. /*
  191. * First, try to find a CPLB that matches this address. If we
  192. * find one, then the fact that we're in the miss handler means
  193. * that the instruction crosses a page boundary.
  194. */
  195. for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
  196. if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
  197. unsigned long this_addr = icplb_tbl[cpu][idx].addr;
  198. if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
  199. addr += PAGE_SIZE;
  200. break;
  201. }
  202. }
  203. }
  204. i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
  205. #ifdef CONFIG_BFIN_ICACHE
  206. /*
  207. * Normal RAM, and possibly the reserved memory area, are
  208. * cacheable.
  209. */
  210. if (addr < _ramend ||
  211. (addr < physical_mem_end && reserved_mem_icache_on))
  212. i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  213. #endif
  214. if (addr >= physical_mem_end) {
  215. if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  216. && (status & FAULT_USERSUPV)) {
  217. addr &= ~(1 * 1024 * 1024 - 1);
  218. i_data &= ~PAGE_SIZE_4KB;
  219. i_data |= PAGE_SIZE_1MB;
  220. } else
  221. return CPLB_PROT_VIOL;
  222. } else if (addr >= _ramend) {
  223. i_data |= CPLB_USER_RD;
  224. } else {
  225. /*
  226. * Two cases to distinguish - a supervisor access must
  227. * necessarily be for a module page; we grant it
  228. * unconditionally (could do better here in the future).
  229. * Otherwise, check the x bitmap of the current process.
  230. */
  231. if (!(status & FAULT_USERSUPV)) {
  232. unsigned long *mask = current_rwx_mask[cpu];
  233. if (mask) {
  234. int page = addr >> PAGE_SHIFT;
  235. int idx = page >> 5;
  236. int bit = 1 << (page & 31);
  237. mask += 2 * page_mask_nelts;
  238. if (mask[idx] & bit)
  239. i_data |= CPLB_USER_RD;
  240. }
  241. }
  242. }
  243. idx = evict_one_icplb(cpu);
  244. addr &= PAGE_MASK;
  245. icplb_tbl[cpu][idx].addr = addr;
  246. icplb_tbl[cpu][idx].data = i_data;
  247. disable_icplb();
  248. bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
  249. bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
  250. enable_icplb();
  251. return 0;
  252. }
  253. static noinline int dcplb_protection_fault(unsigned int cpu)
  254. {
  255. int status = bfin_read_DCPLB_STATUS();
  256. nr_dcplb_prot[cpu]++;
  257. if (status & FAULT_RW) {
  258. int idx = faulting_cplb_index(status);
  259. unsigned long data = dcplb_tbl[cpu][idx].data;
  260. if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
  261. write_permitted(status, data)) {
  262. data |= CPLB_DIRTY;
  263. dcplb_tbl[cpu][idx].data = data;
  264. bfin_write32(DCPLB_DATA0 + idx * 4, data);
  265. return 0;
  266. }
  267. }
  268. return CPLB_PROT_VIOL;
  269. }
  270. int cplb_hdr(int seqstat, struct pt_regs *regs)
  271. {
  272. int cause = seqstat & 0x3f;
  273. unsigned int cpu = smp_processor_id();
  274. switch (cause) {
  275. case 0x23:
  276. return dcplb_protection_fault(cpu);
  277. case 0x2C:
  278. return icplb_miss(cpu);
  279. case 0x26:
  280. return dcplb_miss(cpu);
  281. default:
  282. return 1;
  283. }
  284. }
  285. void flush_switched_cplbs(unsigned int cpu)
  286. {
  287. int i;
  288. unsigned long flags;
  289. nr_cplb_flush[cpu]++;
  290. local_irq_save(flags);
  291. disable_icplb();
  292. for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
  293. icplb_tbl[cpu][i].data = 0;
  294. bfin_write32(ICPLB_DATA0 + i * 4, 0);
  295. }
  296. enable_icplb();
  297. disable_dcplb();
  298. for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
  299. dcplb_tbl[cpu][i].data = 0;
  300. bfin_write32(DCPLB_DATA0 + i * 4, 0);
  301. }
  302. enable_dcplb();
  303. local_irq_restore(flags);
  304. }
  305. void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
  306. {
  307. int i;
  308. unsigned long addr = (unsigned long)masks;
  309. unsigned long d_data;
  310. unsigned long flags;
  311. if (!masks) {
  312. current_rwx_mask[cpu] = masks;
  313. return;
  314. }
  315. local_irq_save(flags);
  316. current_rwx_mask[cpu] = masks;
  317. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  318. #ifdef CONFIG_BFIN_DCACHE
  319. d_data |= CPLB_L1_CHBL;
  320. #ifdef CONFIG_BFIN_WT
  321. d_data |= CPLB_L1_AOW | CPLB_WT;
  322. #endif
  323. #endif
  324. disable_dcplb();
  325. for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
  326. dcplb_tbl[cpu][i].addr = addr;
  327. dcplb_tbl[cpu][i].data = d_data;
  328. bfin_write32(DCPLB_DATA0 + i * 4, d_data);
  329. bfin_write32(DCPLB_ADDR0 + i * 4, addr);
  330. addr += PAGE_SIZE;
  331. }
  332. enable_dcplb();
  333. local_irq_restore(flags);
  334. }