cplbinit.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. /*
  2. * Blackfin CPLB initialization
  3. *
  4. * Copyright 2007-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <asm/blackfin.h>
  10. #include <asm/cacheflush.h>
  11. #include <asm/cplb.h>
  12. #include <asm/cplbinit.h>
  13. #include <asm/mem_map.h>
  14. struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
  15. struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
  16. int first_switched_icplb PDT_ATTR;
  17. int first_switched_dcplb PDT_ATTR;
  18. struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
  19. struct cplb_boundary icplb_bounds[9] PDT_ATTR;
  20. int icplb_nr_bounds PDT_ATTR;
  21. int dcplb_nr_bounds PDT_ATTR;
  22. void __init generate_cplb_tables_cpu(unsigned int cpu)
  23. {
  24. int i_d, i_i;
  25. unsigned long addr;
  26. struct cplb_entry *d_tbl = dcplb_tbl[cpu];
  27. struct cplb_entry *i_tbl = icplb_tbl[cpu];
  28. printk(KERN_INFO "NOMPU: setting up cplb tables\n");
  29. i_d = i_i = 0;
  30. #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
  31. /* Set up the zero page. */
  32. d_tbl[i_d].addr = 0;
  33. d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
  34. i_tbl[i_i].addr = 0;
  35. i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
  36. #endif
  37. /* Cover kernel memory with 4M pages. */
  38. addr = 0;
  39. for (; addr < memory_start; addr += 4 * 1024 * 1024) {
  40. d_tbl[i_d].addr = addr;
  41. d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
  42. i_tbl[i_i].addr = addr;
  43. i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
  44. }
  45. #ifdef CONFIG_ROMKERNEL
  46. /* Cover kernel XIP flash area */
  47. addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
  48. d_tbl[i_d].addr = addr;
  49. d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
  50. i_tbl[i_i].addr = addr;
  51. i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
  52. #endif
  53. /* Cover L1 memory. One 4M area for code and data each is enough. */
  54. if (cpu == 0) {
  55. if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
  56. d_tbl[i_d].addr = L1_DATA_A_START;
  57. d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
  58. }
  59. i_tbl[i_i].addr = L1_CODE_START;
  60. i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
  61. }
  62. #ifdef CONFIG_SMP
  63. else {
  64. if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
  65. d_tbl[i_d].addr = COREB_L1_DATA_A_START;
  66. d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
  67. }
  68. i_tbl[i_i].addr = COREB_L1_CODE_START;
  69. i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
  70. }
  71. #endif
  72. first_switched_dcplb = i_d;
  73. first_switched_icplb = i_i;
  74. BUG_ON(first_switched_dcplb > MAX_CPLBS);
  75. BUG_ON(first_switched_icplb > MAX_CPLBS);
  76. while (i_d < MAX_CPLBS)
  77. d_tbl[i_d++].data = 0;
  78. while (i_i < MAX_CPLBS)
  79. i_tbl[i_i++].data = 0;
  80. }
  81. void __init generate_cplb_tables_all(void)
  82. {
  83. unsigned long uncached_end;
  84. int i_d, i_i;
  85. i_d = 0;
  86. /* Normal RAM, including MTD FS. */
  87. #ifdef CONFIG_MTD_UCLINUX
  88. uncached_end = memory_mtd_start + mtd_size;
  89. #else
  90. uncached_end = memory_end;
  91. #endif
  92. /*
  93. * if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
  94. * so that we don't have to use 4kB pages and cause CPLB thrashing
  95. */
  96. if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
  97. ((_ramend - uncached_end) >= 1 * 1024 * 1024))
  98. dcplb_bounds[i_d].eaddr = uncached_end;
  99. else
  100. dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
  101. dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
  102. /* DMA uncached region. */
  103. if (DMA_UNCACHED_REGION) {
  104. dcplb_bounds[i_d].eaddr = _ramend;
  105. dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
  106. }
  107. if (_ramend != physical_mem_end) {
  108. /* Reserved memory. */
  109. dcplb_bounds[i_d].eaddr = physical_mem_end;
  110. dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
  111. SDRAM_DGENERIC : SDRAM_DNON_CHBL);
  112. }
  113. /* Addressing hole up to the async bank. */
  114. dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
  115. dcplb_bounds[i_d++].data = 0;
  116. /* ASYNC banks. */
  117. dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
  118. dcplb_bounds[i_d++].data = SDRAM_EBIU;
  119. /* Addressing hole up to BootROM. */
  120. dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
  121. dcplb_bounds[i_d++].data = 0;
  122. /* BootROM -- largest one should be less than 1 meg. */
  123. dcplb_bounds[i_d].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
  124. dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
  125. if (L2_LENGTH) {
  126. /* Addressing hole up to L2 SRAM. */
  127. dcplb_bounds[i_d].eaddr = L2_START;
  128. dcplb_bounds[i_d++].data = 0;
  129. /* L2 SRAM. */
  130. dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
  131. dcplb_bounds[i_d++].data = L2_DMEMORY;
  132. }
  133. dcplb_nr_bounds = i_d;
  134. BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
  135. i_i = 0;
  136. /* Normal RAM, including MTD FS. */
  137. icplb_bounds[i_i].eaddr = uncached_end;
  138. icplb_bounds[i_i++].data = SDRAM_IGENERIC;
  139. if (_ramend != physical_mem_end) {
  140. /* DMA uncached region. */
  141. if (DMA_UNCACHED_REGION) {
  142. /* Normally this hole is caught by the async below. */
  143. icplb_bounds[i_i].eaddr = _ramend;
  144. icplb_bounds[i_i++].data = 0;
  145. }
  146. /* Reserved memory. */
  147. icplb_bounds[i_i].eaddr = physical_mem_end;
  148. icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
  149. SDRAM_IGENERIC : SDRAM_INON_CHBL);
  150. }
  151. /* Addressing hole up to the async bank. */
  152. icplb_bounds[i_i].eaddr = ASYNC_BANK0_BASE;
  153. icplb_bounds[i_i++].data = 0;
  154. /* ASYNC banks. */
  155. icplb_bounds[i_i].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
  156. icplb_bounds[i_i++].data = SDRAM_EBIU;
  157. /* Addressing hole up to BootROM. */
  158. icplb_bounds[i_i].eaddr = BOOT_ROM_START;
  159. icplb_bounds[i_i++].data = 0;
  160. /* BootROM -- largest one should be less than 1 meg. */
  161. icplb_bounds[i_i].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
  162. icplb_bounds[i_i++].data = SDRAM_IGENERIC;
  163. if (L2_LENGTH) {
  164. /* Addressing hole up to L2 SRAM. */
  165. icplb_bounds[i_i].eaddr = L2_START;
  166. icplb_bounds[i_i++].data = 0;
  167. /* L2 SRAM. */
  168. icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
  169. icplb_bounds[i_i++].data = L2_IMEMORY;
  170. }
  171. icplb_nr_bounds = i_i;
  172. BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));
  173. }