cplbinit.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. /*
  2. * Blackfin CPLB initialization
  3. *
  4. * Copyright 2007-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <asm/blackfin.h>
  10. #include <asm/cacheflush.h>
  11. #include <asm/cplb.h>
  12. #include <asm/cplbinit.h>
  13. #include <asm/mem_map.h>
  14. struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
  15. struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
  16. int first_switched_icplb PDT_ATTR;
  17. int first_switched_dcplb PDT_ATTR;
  18. struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
  19. struct cplb_boundary icplb_bounds[9] PDT_ATTR;
  20. int icplb_nr_bounds PDT_ATTR;
  21. int dcplb_nr_bounds PDT_ATTR;
  22. void __init generate_cplb_tables_cpu(unsigned int cpu)
  23. {
  24. int i_d, i_i;
  25. unsigned long addr;
  26. struct cplb_entry *d_tbl = dcplb_tbl[cpu];
  27. struct cplb_entry *i_tbl = icplb_tbl[cpu];
  28. printk(KERN_INFO "NOMPU: setting up cplb tables\n");
  29. i_d = i_i = 0;
  30. #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
  31. /* Set up the zero page. */
  32. d_tbl[i_d].addr = 0;
  33. d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
  34. i_tbl[i_i].addr = 0;
  35. i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
  36. #endif
  37. /* Cover kernel memory with 4M pages. */
  38. addr = 0;
  39. for (; addr < memory_start; addr += 4 * 1024 * 1024) {
  40. d_tbl[i_d].addr = addr;
  41. d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
  42. i_tbl[i_i].addr = addr;
  43. i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
  44. }
  45. #ifdef CONFIG_ROMKERNEL
  46. /* Cover kernel XIP flash area */
  47. #ifdef CONFIG_BF60x
  48. addr = CONFIG_ROM_BASE & ~(16 * 1024 * 1024 - 1);
  49. d_tbl[i_d].addr = addr;
  50. d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_16MB;
  51. i_tbl[i_i].addr = addr;
  52. i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_16MB;
  53. #else
  54. addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
  55. d_tbl[i_d].addr = addr;
  56. d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
  57. i_tbl[i_i].addr = addr;
  58. i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
  59. #endif
  60. #endif
  61. /* Cover L1 memory. One 4M area for code and data each is enough. */
  62. if (cpu == 0) {
  63. if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
  64. d_tbl[i_d].addr = L1_DATA_A_START;
  65. d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
  66. }
  67. i_tbl[i_i].addr = L1_CODE_START;
  68. i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
  69. }
  70. #ifdef CONFIG_SMP
  71. else {
  72. if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
  73. d_tbl[i_d].addr = COREB_L1_DATA_A_START;
  74. d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
  75. }
  76. i_tbl[i_i].addr = COREB_L1_CODE_START;
  77. i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
  78. }
  79. #endif
  80. first_switched_dcplb = i_d;
  81. first_switched_icplb = i_i;
  82. BUG_ON(first_switched_dcplb > MAX_CPLBS);
  83. BUG_ON(first_switched_icplb > MAX_CPLBS);
  84. while (i_d < MAX_CPLBS)
  85. d_tbl[i_d++].data = 0;
  86. while (i_i < MAX_CPLBS)
  87. i_tbl[i_i++].data = 0;
  88. }
  89. void __init generate_cplb_tables_all(void)
  90. {
  91. unsigned long uncached_end;
  92. int i_d, i_i;
  93. i_d = 0;
  94. /* Normal RAM, including MTD FS. */
  95. #ifdef CONFIG_MTD_UCLINUX
  96. uncached_end = memory_mtd_start + mtd_size;
  97. #else
  98. uncached_end = memory_end;
  99. #endif
  100. /*
  101. * if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
  102. * so that we don't have to use 4kB pages and cause CPLB thrashing
  103. */
  104. if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
  105. ((_ramend - uncached_end) >= 1 * 1024 * 1024))
  106. dcplb_bounds[i_d].eaddr = uncached_end;
  107. else
  108. dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
  109. dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
  110. /* DMA uncached region. */
  111. if (DMA_UNCACHED_REGION) {
  112. dcplb_bounds[i_d].eaddr = _ramend;
  113. dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
  114. }
  115. if (_ramend != physical_mem_end) {
  116. /* Reserved memory. */
  117. dcplb_bounds[i_d].eaddr = physical_mem_end;
  118. dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
  119. SDRAM_DGENERIC : SDRAM_DNON_CHBL);
  120. }
  121. /* Addressing hole up to the async bank. */
  122. dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
  123. dcplb_bounds[i_d++].data = 0;
  124. /* ASYNC banks. */
  125. dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
  126. dcplb_bounds[i_d++].data = SDRAM_EBIU;
  127. /* Addressing hole up to BootROM. */
  128. dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
  129. dcplb_bounds[i_d++].data = 0;
  130. /* BootROM -- largest one should be less than 1 meg. */
  131. dcplb_bounds[i_d].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH;
  132. dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
  133. if (L2_LENGTH) {
  134. /* Addressing hole up to L2 SRAM. */
  135. dcplb_bounds[i_d].eaddr = L2_START;
  136. dcplb_bounds[i_d++].data = 0;
  137. /* L2 SRAM. */
  138. dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
  139. dcplb_bounds[i_d++].data = L2_DMEMORY;
  140. }
  141. dcplb_nr_bounds = i_d;
  142. BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
  143. i_i = 0;
  144. /* Normal RAM, including MTD FS. */
  145. icplb_bounds[i_i].eaddr = uncached_end;
  146. icplb_bounds[i_i++].data = SDRAM_IGENERIC;
  147. if (_ramend != physical_mem_end) {
  148. /* DMA uncached region. */
  149. if (DMA_UNCACHED_REGION) {
  150. /* Normally this hole is caught by the async below. */
  151. icplb_bounds[i_i].eaddr = _ramend;
  152. icplb_bounds[i_i++].data = 0;
  153. }
  154. /* Reserved memory. */
  155. icplb_bounds[i_i].eaddr = physical_mem_end;
  156. icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
  157. SDRAM_IGENERIC : SDRAM_INON_CHBL);
  158. }
  159. /* Addressing hole up to the async bank. */
  160. icplb_bounds[i_i].eaddr = ASYNC_BANK0_BASE;
  161. icplb_bounds[i_i++].data = 0;
  162. /* ASYNC banks. */
  163. icplb_bounds[i_i].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
  164. icplb_bounds[i_i++].data = SDRAM_EBIU;
  165. /* Addressing hole up to BootROM. */
  166. icplb_bounds[i_i].eaddr = BOOT_ROM_START;
  167. icplb_bounds[i_i++].data = 0;
  168. /* BootROM -- largest one should be less than 1 meg. */
  169. icplb_bounds[i_i].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH;
  170. icplb_bounds[i_i++].data = SDRAM_IGENERIC;
  171. if (L2_LENGTH) {
  172. /* Addressing hole up to L2 SRAM. */
  173. icplb_bounds[i_i].eaddr = L2_START;
  174. icplb_bounds[i_i++].data = 0;
  175. /* L2 SRAM. */
  176. icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
  177. icplb_bounds[i_i++].data = L2_IMEMORY;
  178. }
  179. icplb_nr_bounds = i_i;
  180. BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));
  181. }