setup.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359
  1. /*
  2. * Copyright 2004-2010 Analog Devices Inc.
  3. *
  4. * Licensed under the GPL-2 or later.
  5. */
  6. #include <linux/delay.h>
  7. #include <linux/console.h>
  8. #include <linux/bootmem.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/cpu.h>
  11. #include <linux/mm.h>
  12. #include <linux/module.h>
  13. #include <linux/tty.h>
  14. #include <linux/pfn.h>
  15. #ifdef CONFIG_MTD_UCLINUX
  16. #include <linux/mtd/map.h>
  17. #include <linux/ext2_fs.h>
  18. #include <linux/cramfs_fs.h>
  19. #include <linux/romfs_fs.h>
  20. #endif
  21. #include <asm/cplb.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/blackfin.h>
  24. #include <asm/cplbinit.h>
  25. #include <asm/div64.h>
  26. #include <asm/cpu.h>
  27. #include <asm/fixed_code.h>
  28. #include <asm/early_printk.h>
  29. u16 _bfin_swrst;
  30. EXPORT_SYMBOL(_bfin_swrst);
  31. unsigned long memory_start, memory_end, physical_mem_end;
  32. unsigned long _rambase, _ramstart, _ramend;
  33. unsigned long reserved_mem_dcache_on;
  34. unsigned long reserved_mem_icache_on;
  35. EXPORT_SYMBOL(memory_start);
  36. EXPORT_SYMBOL(memory_end);
  37. EXPORT_SYMBOL(physical_mem_end);
  38. EXPORT_SYMBOL(_ramend);
  39. EXPORT_SYMBOL(reserved_mem_dcache_on);
  40. #ifdef CONFIG_MTD_UCLINUX
  41. extern struct map_info uclinux_ram_map;
  42. unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
  43. unsigned long _ebss;
  44. EXPORT_SYMBOL(memory_mtd_end);
  45. EXPORT_SYMBOL(memory_mtd_start);
  46. EXPORT_SYMBOL(mtd_size);
  47. #endif
  48. char __initdata command_line[COMMAND_LINE_SIZE];
  49. void __initdata *init_retx, *init_saved_retx, *init_saved_seqstat,
  50. *init_saved_icplb_fault_addr, *init_saved_dcplb_fault_addr;
  51. /* boot memmap, for parsing "memmap=" */
  52. #define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */
  53. #define BFIN_MEMMAP_RAM 1
  54. #define BFIN_MEMMAP_RESERVED 2
  55. static struct bfin_memmap {
  56. int nr_map;
  57. struct bfin_memmap_entry {
  58. unsigned long long addr; /* start of memory segment */
  59. unsigned long long size;
  60. unsigned long type;
  61. } map[BFIN_MEMMAP_MAX];
  62. } bfin_memmap __initdata;
  63. /* for memmap sanitization */
  64. struct change_member {
  65. struct bfin_memmap_entry *pentry; /* pointer to original entry */
  66. unsigned long long addr; /* address for this change point */
  67. };
  68. static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata;
  69. static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
  70. static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
  71. static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
  72. DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
  73. static int early_init_clkin_hz(char *buf);
  74. #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
  75. void __init generate_cplb_tables(void)
  76. {
  77. unsigned int cpu;
  78. generate_cplb_tables_all();
  79. /* Generate per-CPU I&D CPLB tables */
  80. for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
  81. generate_cplb_tables_cpu(cpu);
  82. }
  83. #endif
  84. void __cpuinit bfin_setup_caches(unsigned int cpu)
  85. {
  86. #ifdef CONFIG_BFIN_ICACHE
  87. bfin_icache_init(icplb_tbl[cpu]);
  88. #endif
  89. #ifdef CONFIG_BFIN_DCACHE
  90. bfin_dcache_init(dcplb_tbl[cpu]);
  91. #endif
  92. /*
  93. * In cache coherence emulation mode, we need to have the
  94. * D-cache enabled before running any atomic operation which
  95. * might involve cache invalidation (i.e. spinlock, rwlock).
  96. * So printk's are deferred until then.
  97. */
  98. #ifdef CONFIG_BFIN_ICACHE
  99. printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
  100. printk(KERN_INFO " External memory:"
  101. # ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
  102. " cacheable"
  103. # else
  104. " uncacheable"
  105. # endif
  106. " in instruction cache\n");
  107. if (L2_LENGTH)
  108. printk(KERN_INFO " L2 SRAM :"
  109. # ifdef CONFIG_BFIN_L2_ICACHEABLE
  110. " cacheable"
  111. # else
  112. " uncacheable"
  113. # endif
  114. " in instruction cache\n");
  115. #else
  116. printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
  117. #endif
  118. #ifdef CONFIG_BFIN_DCACHE
  119. printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
  120. printk(KERN_INFO " External memory:"
  121. # if defined CONFIG_BFIN_EXTMEM_WRITEBACK
  122. " cacheable (write-back)"
  123. # elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
  124. " cacheable (write-through)"
  125. # else
  126. " uncacheable"
  127. # endif
  128. " in data cache\n");
  129. if (L2_LENGTH)
  130. printk(KERN_INFO " L2 SRAM :"
  131. # if defined CONFIG_BFIN_L2_WRITEBACK
  132. " cacheable (write-back)"
  133. # elif defined CONFIG_BFIN_L2_WRITETHROUGH
  134. " cacheable (write-through)"
  135. # else
  136. " uncacheable"
  137. # endif
  138. " in data cache\n");
  139. #else
  140. printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
  141. #endif
  142. }
  143. void __cpuinit bfin_setup_cpudata(unsigned int cpu)
  144. {
  145. struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
  146. cpudata->idle = current;
  147. cpudata->imemctl = bfin_read_IMEM_CONTROL();
  148. cpudata->dmemctl = bfin_read_DMEM_CONTROL();
  149. }
  150. void __init bfin_cache_init(void)
  151. {
  152. #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
  153. generate_cplb_tables();
  154. #endif
  155. bfin_setup_caches(0);
  156. }
  157. void __init bfin_relocate_l1_mem(void)
  158. {
  159. unsigned long text_l1_len = (unsigned long)_text_l1_len;
  160. unsigned long data_l1_len = (unsigned long)_data_l1_len;
  161. unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
  162. unsigned long l2_len = (unsigned long)_l2_len;
  163. early_shadow_stamp();
  164. /*
  165. * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
  166. * we know that everything about l1 text/data is nice and aligned,
  167. * so copy by 4 byte chunks, and don't worry about overlapping
  168. * src/dest.
  169. *
  170. * We can't use the dma_memcpy functions, since they can call
  171. * scheduler functions which might be in L1 :( and core writes
  172. * into L1 instruction cause bad access errors, so we are stuck,
  173. * we are required to use DMA, but can't use the common dma
  174. * functions. We can't use memcpy either - since that might be
  175. * going to be in the relocated L1
  176. */
  177. blackfin_dma_early_init();
  178. /* if necessary, copy L1 text to L1 instruction SRAM */
  179. if (L1_CODE_LENGTH && text_l1_len)
  180. early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
  181. /* if necessary, copy L1 data to L1 data bank A SRAM */
  182. if (L1_DATA_A_LENGTH && data_l1_len)
  183. early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
  184. /* if necessary, copy L1 data B to L1 data bank B SRAM */
  185. if (L1_DATA_B_LENGTH && data_b_l1_len)
  186. early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len);
  187. early_dma_memcpy_done();
  188. #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
  189. blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1;
  190. #endif
  191. /* if necessary, copy L2 text/data to L2 SRAM */
  192. if (L2_LENGTH && l2_len)
  193. memcpy(_stext_l2, _l2_lma, l2_len);
  194. }
  195. #ifdef CONFIG_SMP
  196. void __init bfin_relocate_coreb_l1_mem(void)
  197. {
  198. unsigned long text_l1_len = (unsigned long)_text_l1_len;
  199. unsigned long data_l1_len = (unsigned long)_data_l1_len;
  200. unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
  201. blackfin_dma_early_init();
  202. /* if necessary, copy L1 text to L1 instruction SRAM */
  203. if (L1_CODE_LENGTH && text_l1_len)
  204. early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma,
  205. text_l1_len);
  206. /* if necessary, copy L1 data to L1 data bank A SRAM */
  207. if (L1_DATA_A_LENGTH && data_l1_len)
  208. early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma,
  209. data_l1_len);
  210. /* if necessary, copy L1 data B to L1 data bank B SRAM */
  211. if (L1_DATA_B_LENGTH && data_b_l1_len)
  212. early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma,
  213. data_b_l1_len);
  214. early_dma_memcpy_done();
  215. #ifdef CONFIG_ICACHE_FLUSH_L1
  216. blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 -
  217. (unsigned long)_stext_l1 + COREB_L1_CODE_START;
  218. #endif
  219. }
  220. #endif
  221. #ifdef CONFIG_ROMKERNEL
  222. void __init bfin_relocate_xip_data(void)
  223. {
  224. early_shadow_stamp();
  225. memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
  226. memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
  227. }
  228. #endif
  229. /* add_memory_region to memmap */
  230. static void __init add_memory_region(unsigned long long start,
  231. unsigned long long size, int type)
  232. {
  233. int i;
  234. i = bfin_memmap.nr_map;
  235. if (i == BFIN_MEMMAP_MAX) {
  236. printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
  237. return;
  238. }
  239. bfin_memmap.map[i].addr = start;
  240. bfin_memmap.map[i].size = size;
  241. bfin_memmap.map[i].type = type;
  242. bfin_memmap.nr_map++;
  243. }
  244. /*
  245. * Sanitize the boot memmap, removing overlaps.
  246. */
  247. static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
  248. {
  249. struct change_member *change_tmp;
  250. unsigned long current_type, last_type;
  251. unsigned long long last_addr;
  252. int chgidx, still_changing;
  253. int overlap_entries;
  254. int new_entry;
  255. int old_nr, new_nr, chg_nr;
  256. int i;
  257. /*
  258. Visually we're performing the following (1,2,3,4 = memory types)
  259. Sample memory map (w/overlaps):
  260. ____22__________________
  261. ______________________4_
  262. ____1111________________
  263. _44_____________________
  264. 11111111________________
  265. ____________________33__
  266. ___________44___________
  267. __________33333_________
  268. ______________22________
  269. ___________________2222_
  270. _________111111111______
  271. _____________________11_
  272. _________________4______
  273. Sanitized equivalent (no overlap):
  274. 1_______________________
  275. _44_____________________
  276. ___1____________________
  277. ____22__________________
  278. ______11________________
  279. _________1______________
  280. __________3_____________
  281. ___________44___________
  282. _____________33_________
  283. _______________2________
  284. ________________1_______
  285. _________________4______
  286. ___________________2____
  287. ____________________33__
  288. ______________________4_
  289. */
  290. /* if there's only one memory region, don't bother */
  291. if (*pnr_map < 2)
  292. return -1;
  293. old_nr = *pnr_map;
  294. /* bail out if we find any unreasonable addresses in memmap */
  295. for (i = 0; i < old_nr; i++)
  296. if (map[i].addr + map[i].size < map[i].addr)
  297. return -1;
  298. /* create pointers for initial change-point information (for sorting) */
  299. for (i = 0; i < 2*old_nr; i++)
  300. change_point[i] = &change_point_list[i];
  301. /* record all known change-points (starting and ending addresses),
  302. omitting those that are for empty memory regions */
  303. chgidx = 0;
  304. for (i = 0; i < old_nr; i++) {
  305. if (map[i].size != 0) {
  306. change_point[chgidx]->addr = map[i].addr;
  307. change_point[chgidx++]->pentry = &map[i];
  308. change_point[chgidx]->addr = map[i].addr + map[i].size;
  309. change_point[chgidx++]->pentry = &map[i];
  310. }
  311. }
  312. chg_nr = chgidx; /* true number of change-points */
  313. /* sort change-point list by memory addresses (low -> high) */
  314. still_changing = 1;
  315. while (still_changing) {
  316. still_changing = 0;
  317. for (i = 1; i < chg_nr; i++) {
  318. /* if <current_addr> > <last_addr>, swap */
  319. /* or, if current=<start_addr> & last=<end_addr>, swap */
  320. if ((change_point[i]->addr < change_point[i-1]->addr) ||
  321. ((change_point[i]->addr == change_point[i-1]->addr) &&
  322. (change_point[i]->addr == change_point[i]->pentry->addr) &&
  323. (change_point[i-1]->addr != change_point[i-1]->pentry->addr))
  324. ) {
  325. change_tmp = change_point[i];
  326. change_point[i] = change_point[i-1];
  327. change_point[i-1] = change_tmp;
  328. still_changing = 1;
  329. }
  330. }
  331. }
  332. /* create a new memmap, removing overlaps */
  333. overlap_entries = 0; /* number of entries in the overlap table */
  334. new_entry = 0; /* index for creating new memmap entries */
  335. last_type = 0; /* start with undefined memory type */
  336. last_addr = 0; /* start with 0 as last starting address */
  337. /* loop through change-points, determining affect on the new memmap */
  338. for (chgidx = 0; chgidx < chg_nr; chgidx++) {
  339. /* keep track of all overlapping memmap entries */
  340. if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) {
  341. /* add map entry to overlap list (> 1 entry implies an overlap) */
  342. overlap_list[overlap_entries++] = change_point[chgidx]->pentry;
  343. } else {
  344. /* remove entry from list (order independent, so swap with last) */
  345. for (i = 0; i < overlap_entries; i++) {
  346. if (overlap_list[i] == change_point[chgidx]->pentry)
  347. overlap_list[i] = overlap_list[overlap_entries-1];
  348. }
  349. overlap_entries--;
  350. }
  351. /* if there are overlapping entries, decide which "type" to use */
  352. /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
  353. current_type = 0;
  354. for (i = 0; i < overlap_entries; i++)
  355. if (overlap_list[i]->type > current_type)
  356. current_type = overlap_list[i]->type;
  357. /* continue building up new memmap based on this information */
  358. if (current_type != last_type) {
  359. if (last_type != 0) {
  360. new_map[new_entry].size =
  361. change_point[chgidx]->addr - last_addr;
  362. /* move forward only if the new size was non-zero */
  363. if (new_map[new_entry].size != 0)
  364. if (++new_entry >= BFIN_MEMMAP_MAX)
  365. break; /* no more space left for new entries */
  366. }
  367. if (current_type != 0) {
  368. new_map[new_entry].addr = change_point[chgidx]->addr;
  369. new_map[new_entry].type = current_type;
  370. last_addr = change_point[chgidx]->addr;
  371. }
  372. last_type = current_type;
  373. }
  374. }
  375. new_nr = new_entry; /* retain count for new entries */
  376. /* copy new mapping into original location */
  377. memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
  378. *pnr_map = new_nr;
  379. return 0;
  380. }
  381. static void __init print_memory_map(char *who)
  382. {
  383. int i;
  384. for (i = 0; i < bfin_memmap.nr_map; i++) {
  385. printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who,
  386. bfin_memmap.map[i].addr,
  387. bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
  388. switch (bfin_memmap.map[i].type) {
  389. case BFIN_MEMMAP_RAM:
  390. printk(KERN_CONT "(usable)\n");
  391. break;
  392. case BFIN_MEMMAP_RESERVED:
  393. printk(KERN_CONT "(reserved)\n");
  394. break;
  395. default:
  396. printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type);
  397. break;
  398. }
  399. }
  400. }
  401. static __init int parse_memmap(char *arg)
  402. {
  403. unsigned long long start_at, mem_size;
  404. if (!arg)
  405. return -EINVAL;
  406. mem_size = memparse(arg, &arg);
  407. if (*arg == '@') {
  408. start_at = memparse(arg+1, &arg);
  409. add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM);
  410. } else if (*arg == '$') {
  411. start_at = memparse(arg+1, &arg);
  412. add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED);
  413. }
  414. return 0;
  415. }
  416. /*
  417. * Initial parsing of the command line. Currently, we support:
  418. * - Controlling the linux memory size: mem=xxx[KMG]
  419. * - Controlling the physical memory size: max_mem=xxx[KMG][$][#]
  420. * $ -> reserved memory is dcacheable
  421. * # -> reserved memory is icacheable
  422. * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
  423. * @ from <start> to <start>+<mem>, type RAM
  424. * $ from <start> to <start>+<mem>, type RESERVED
  425. */
  426. static __init void parse_cmdline_early(char *cmdline_p)
  427. {
  428. char c = ' ', *to = cmdline_p;
  429. unsigned int memsize;
  430. for (;;) {
  431. if (c == ' ') {
  432. if (!memcmp(to, "mem=", 4)) {
  433. to += 4;
  434. memsize = memparse(to, &to);
  435. if (memsize)
  436. _ramend = memsize;
  437. } else if (!memcmp(to, "max_mem=", 8)) {
  438. to += 8;
  439. memsize = memparse(to, &to);
  440. if (memsize) {
  441. physical_mem_end = memsize;
  442. if (*to != ' ') {
  443. if (*to == '$'
  444. || *(to + 1) == '$')
  445. reserved_mem_dcache_on = 1;
  446. if (*to == '#'
  447. || *(to + 1) == '#')
  448. reserved_mem_icache_on = 1;
  449. }
  450. }
  451. } else if (!memcmp(to, "clkin_hz=", 9)) {
  452. to += 9;
  453. early_init_clkin_hz(to);
  454. #ifdef CONFIG_EARLY_PRINTK
  455. } else if (!memcmp(to, "earlyprintk=", 12)) {
  456. to += 12;
  457. setup_early_printk(to);
  458. #endif
  459. } else if (!memcmp(to, "memmap=", 7)) {
  460. to += 7;
  461. parse_memmap(to);
  462. }
  463. }
  464. c = *(to++);
  465. if (!c)
  466. break;
  467. }
  468. }
  469. /*
  470. * Setup memory defaults from user config.
  471. * The physical memory layout looks like:
  472. *
  473. * [_rambase, _ramstart]: kernel image
  474. * [memory_start, memory_end]: dynamic memory managed by kernel
  475. * [memory_end, _ramend]: reserved memory
  476. * [memory_mtd_start(memory_end),
  477. * memory_mtd_start + mtd_size]: rootfs (if any)
  478. * [_ramend - DMA_UNCACHED_REGION,
  479. * _ramend]: uncached DMA region
  480. * [_ramend, physical_mem_end]: memory not managed by kernel
  481. */
  482. static __init void memory_setup(void)
  483. {
  484. #ifdef CONFIG_MTD_UCLINUX
  485. unsigned long mtd_phys = 0;
  486. #endif
  487. unsigned long max_mem;
  488. _rambase = CONFIG_BOOT_LOAD;
  489. _ramstart = (unsigned long)_end;
  490. if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
  491. console_init();
  492. panic("DMA region exceeds memory limit: %lu.",
  493. _ramend - _ramstart);
  494. }
  495. max_mem = memory_end = _ramend - DMA_UNCACHED_REGION;
  496. #if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
  497. /* Due to a Hardware Anomaly we need to limit the size of usable
  498. * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
  499. * 05000263 - Hardware loop corrupted when taking an ICPLB exception
  500. */
  501. # if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
  502. if (max_mem >= 56 * 1024 * 1024)
  503. max_mem = 56 * 1024 * 1024;
  504. # else
  505. if (max_mem >= 60 * 1024 * 1024)
  506. max_mem = 60 * 1024 * 1024;
  507. # endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */
  508. #endif /* ANOMALY_05000263 */
  509. #ifdef CONFIG_MPU
  510. /* Round up to multiple of 4MB */
  511. memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
  512. #else
  513. memory_start = PAGE_ALIGN(_ramstart);
  514. #endif
  515. #if defined(CONFIG_MTD_UCLINUX)
  516. /* generic memory mapped MTD driver */
  517. memory_mtd_end = memory_end;
  518. mtd_phys = _ramstart;
  519. mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
  520. # if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
  521. if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
  522. mtd_size =
  523. PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
  524. # endif
  525. # if defined(CONFIG_CRAMFS)
  526. if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC)
  527. mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4)));
  528. # endif
  529. # if defined(CONFIG_ROMFS_FS)
  530. if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
  531. && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) {
  532. mtd_size =
  533. PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
  534. /* ROM_FS is XIP, so if we found it, we need to limit memory */
  535. if (memory_end > max_mem) {
  536. pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
  537. memory_end = max_mem;
  538. }
  539. }
  540. # endif /* CONFIG_ROMFS_FS */
  541. /* Since the default MTD_UCLINUX has no magic number, we just blindly
  542. * read 8 past the end of the kernel's image, and look at it.
  543. * When no image is attached, mtd_size is set to a random number
  544. * Do some basic sanity checks before operating on things
  545. */
  546. if (mtd_size == 0 || memory_end <= mtd_size) {
  547. pr_emerg("Could not find valid ram mtd attached.\n");
  548. } else {
  549. memory_end -= mtd_size;
  550. /* Relocate MTD image to the top of memory after the uncached memory area */
  551. uclinux_ram_map.phys = memory_mtd_start = memory_end;
  552. uclinux_ram_map.size = mtd_size;
  553. pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
  554. _end, mtd_size, (void *)memory_mtd_start);
  555. dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
  556. }
  557. #endif /* CONFIG_MTD_UCLINUX */
  558. /* We need lo limit memory, since everything could have a text section
  559. * of userspace in it, and expose anomaly 05000263. If the anomaly
  560. * doesn't exist, or we don't need to - then dont.
  561. */
  562. if (memory_end > max_mem) {
  563. pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
  564. memory_end = max_mem;
  565. }
  566. #ifdef CONFIG_MPU
  567. #if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
  568. page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE -
  569. ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
  570. #else
  571. page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
  572. #endif
  573. page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
  574. #endif
  575. init_mm.start_code = (unsigned long)_stext;
  576. init_mm.end_code = (unsigned long)_etext;
  577. init_mm.end_data = (unsigned long)_edata;
  578. init_mm.brk = (unsigned long)0;
  579. printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20);
  580. printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20);
  581. printk(KERN_INFO "Memory map:\n"
  582. " fixedcode = 0x%p-0x%p\n"
  583. " text = 0x%p-0x%p\n"
  584. " rodata = 0x%p-0x%p\n"
  585. " bss = 0x%p-0x%p\n"
  586. " data = 0x%p-0x%p\n"
  587. " stack = 0x%p-0x%p\n"
  588. " init = 0x%p-0x%p\n"
  589. " available = 0x%p-0x%p\n"
  590. #ifdef CONFIG_MTD_UCLINUX
  591. " rootfs = 0x%p-0x%p\n"
  592. #endif
  593. #if DMA_UNCACHED_REGION > 0
  594. " DMA Zone = 0x%p-0x%p\n"
  595. #endif
  596. , (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
  597. _stext, _etext,
  598. __start_rodata, __end_rodata,
  599. __bss_start, __bss_stop,
  600. _sdata, _edata,
  601. (void *)&init_thread_union,
  602. (void *)((int)(&init_thread_union) + THREAD_SIZE),
  603. __init_begin, __init_end,
  604. (void *)_ramstart, (void *)memory_end
  605. #ifdef CONFIG_MTD_UCLINUX
  606. , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
  607. #endif
  608. #if DMA_UNCACHED_REGION > 0
  609. , (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend)
  610. #endif
  611. );
  612. }
  613. /*
  614. * Find the lowest, highest page frame number we have available
  615. */
  616. void __init find_min_max_pfn(void)
  617. {
  618. int i;
  619. max_pfn = 0;
  620. min_low_pfn = memory_end;
  621. for (i = 0; i < bfin_memmap.nr_map; i++) {
  622. unsigned long start, end;
  623. /* RAM? */
  624. if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
  625. continue;
  626. start = PFN_UP(bfin_memmap.map[i].addr);
  627. end = PFN_DOWN(bfin_memmap.map[i].addr +
  628. bfin_memmap.map[i].size);
  629. if (start >= end)
  630. continue;
  631. if (end > max_pfn)
  632. max_pfn = end;
  633. if (start < min_low_pfn)
  634. min_low_pfn = start;
  635. }
  636. }
  637. static __init void setup_bootmem_allocator(void)
  638. {
  639. int bootmap_size;
  640. int i;
  641. unsigned long start_pfn, end_pfn;
  642. unsigned long curr_pfn, last_pfn, size;
  643. /* mark memory between memory_start and memory_end usable */
  644. add_memory_region(memory_start,
  645. memory_end - memory_start, BFIN_MEMMAP_RAM);
  646. /* sanity check for overlap */
  647. sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
  648. print_memory_map("boot memmap");
  649. /* initialize globals in linux/bootmem.h */
  650. find_min_max_pfn();
  651. /* pfn of the last usable page frame */
  652. if (max_pfn > memory_end >> PAGE_SHIFT)
  653. max_pfn = memory_end >> PAGE_SHIFT;
  654. /* pfn of last page frame directly mapped by kernel */
  655. max_low_pfn = max_pfn;
  656. /* pfn of the first usable page frame after kernel image*/
  657. if (min_low_pfn < memory_start >> PAGE_SHIFT)
  658. min_low_pfn = memory_start >> PAGE_SHIFT;
  659. start_pfn = PAGE_OFFSET >> PAGE_SHIFT;
  660. end_pfn = memory_end >> PAGE_SHIFT;
  661. /*
  662. * give all the memory to the bootmap allocator, tell it to put the
  663. * boot mem_map at the start of memory.
  664. */
  665. bootmap_size = init_bootmem_node(NODE_DATA(0),
  666. memory_start >> PAGE_SHIFT, /* map goes here */
  667. start_pfn, end_pfn);
  668. /* register the memmap regions with the bootmem allocator */
  669. for (i = 0; i < bfin_memmap.nr_map; i++) {
  670. /*
  671. * Reserve usable memory
  672. */
  673. if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
  674. continue;
  675. /*
  676. * We are rounding up the start address of usable memory:
  677. */
  678. curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
  679. if (curr_pfn >= end_pfn)
  680. continue;
  681. /*
  682. * ... and at the end of the usable range downwards:
  683. */
  684. last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
  685. bfin_memmap.map[i].size);
  686. if (last_pfn > end_pfn)
  687. last_pfn = end_pfn;
  688. /*
  689. * .. finally, did all the rounding and playing
  690. * around just make the area go away?
  691. */
  692. if (last_pfn <= curr_pfn)
  693. continue;
  694. size = last_pfn - curr_pfn;
  695. free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
  696. }
  697. /* reserve memory before memory_start, including bootmap */
  698. reserve_bootmem(PAGE_OFFSET,
  699. memory_start + bootmap_size + PAGE_SIZE - 1 - PAGE_OFFSET,
  700. BOOTMEM_DEFAULT);
  701. }
  702. #define EBSZ_TO_MEG(ebsz) \
  703. ({ \
  704. int meg = 0; \
  705. switch (ebsz & 0xf) { \
  706. case 0x1: meg = 16; break; \
  707. case 0x3: meg = 32; break; \
  708. case 0x5: meg = 64; break; \
  709. case 0x7: meg = 128; break; \
  710. case 0x9: meg = 256; break; \
  711. case 0xb: meg = 512; break; \
  712. } \
  713. meg; \
  714. })
  715. static inline int __init get_mem_size(void)
  716. {
  717. #if defined(EBIU_SDBCTL)
  718. # if defined(BF561_FAMILY)
  719. int ret = 0;
  720. u32 sdbctl = bfin_read_EBIU_SDBCTL();
  721. ret += EBSZ_TO_MEG(sdbctl >> 0);
  722. ret += EBSZ_TO_MEG(sdbctl >> 8);
  723. ret += EBSZ_TO_MEG(sdbctl >> 16);
  724. ret += EBSZ_TO_MEG(sdbctl >> 24);
  725. return ret;
  726. # else
  727. return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL());
  728. # endif
  729. #elif defined(EBIU_DDRCTL1)
  730. u32 ddrctl = bfin_read_EBIU_DDRCTL1();
  731. int ret = 0;
  732. switch (ddrctl & 0xc0000) {
  733. case DEVSZ_64: ret = 64 / 8;
  734. case DEVSZ_128: ret = 128 / 8;
  735. case DEVSZ_256: ret = 256 / 8;
  736. case DEVSZ_512: ret = 512 / 8;
  737. }
  738. switch (ddrctl & 0x30000) {
  739. case DEVWD_4: ret *= 2;
  740. case DEVWD_8: ret *= 2;
  741. case DEVWD_16: break;
  742. }
  743. if ((ddrctl & 0xc000) == 0x4000)
  744. ret *= 2;
  745. return ret;
  746. #endif
  747. BUG();
  748. }
  749. __attribute__((weak))
  750. void __init native_machine_early_platform_add_devices(void)
  751. {
  752. }
  753. void __init setup_arch(char **cmdline_p)
  754. {
  755. unsigned long sclk, cclk;
  756. native_machine_early_platform_add_devices();
  757. enable_shadow_console();
  758. /* Check to make sure we are running on the right processor */
  759. if (unlikely(CPUID != bfin_cpuid()))
  760. printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
  761. CPU, bfin_cpuid(), bfin_revid());
  762. #ifdef CONFIG_DUMMY_CONSOLE
  763. conswitchp = &dummy_con;
  764. #endif
  765. #if defined(CONFIG_CMDLINE_BOOL)
  766. strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line));
  767. command_line[sizeof(command_line) - 1] = 0;
  768. #endif
  769. /* Keep a copy of command line */
  770. *cmdline_p = &command_line[0];
  771. memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
  772. boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
  773. memset(&bfin_memmap, 0, sizeof(bfin_memmap));
  774. /* If the user does not specify things on the command line, use
  775. * what the bootloader set things up as
  776. */
  777. physical_mem_end = 0;
  778. parse_cmdline_early(&command_line[0]);
  779. if (_ramend == 0)
  780. _ramend = get_mem_size() * 1024 * 1024;
  781. if (physical_mem_end == 0)
  782. physical_mem_end = _ramend;
  783. memory_setup();
  784. /* Initialize Async memory banks */
  785. bfin_write_EBIU_AMBCTL0(AMBCTL0VAL);
  786. bfin_write_EBIU_AMBCTL1(AMBCTL1VAL);
  787. bfin_write_EBIU_AMGCTL(AMGCTLVAL);
  788. #ifdef CONFIG_EBIU_MBSCTLVAL
  789. bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL);
  790. bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
  791. bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
  792. #endif
  793. #ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
  794. bfin_write_PORTF_HYSTERISIS(HYST_PORTF_0_15);
  795. bfin_write_PORTG_HYSTERISIS(HYST_PORTG_0_15);
  796. bfin_write_PORTH_HYSTERISIS(HYST_PORTH_0_15);
  797. bfin_write_MISCPORT_HYSTERISIS((bfin_read_MISCPORT_HYSTERISIS() &
  798. ~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
  799. #endif
  800. cclk = get_cclk();
  801. sclk = get_sclk();
  802. if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
  803. panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
  804. #ifdef BF561_FAMILY
  805. if (ANOMALY_05000266) {
  806. bfin_read_IMDMA_D0_IRQ_STATUS();
  807. bfin_read_IMDMA_D1_IRQ_STATUS();
  808. }
  809. #endif
  810. printk(KERN_INFO "Hardware Trace ");
  811. if (bfin_read_TBUFCTL() & 0x1)
  812. printk(KERN_CONT "Active ");
  813. else
  814. printk(KERN_CONT "Off ");
  815. if (bfin_read_TBUFCTL() & 0x2)
  816. printk(KERN_CONT "and Enabled\n");
  817. else
  818. printk(KERN_CONT "and Disabled\n");
  819. printk(KERN_INFO "Boot Mode: %i\n", bfin_read_SYSCR() & 0xF);
  820. /* Newer parts mirror SWRST bits in SYSCR */
  821. #if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
  822. defined(CONFIG_BF538) || defined(CONFIG_BF539)
  823. _bfin_swrst = bfin_read_SWRST();
  824. #else
  825. /* Clear boot mode field */
  826. _bfin_swrst = bfin_read_SYSCR() & ~0xf;
  827. #endif
  828. #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
  829. bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT);
  830. #endif
  831. #ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
  832. bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
  833. #endif
  834. #ifdef CONFIG_SMP
  835. if (_bfin_swrst & SWRST_DBL_FAULT_A) {
  836. #else
  837. if (_bfin_swrst & RESET_DOUBLE) {
  838. #endif
  839. printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
  840. #ifdef CONFIG_DEBUG_DOUBLEFAULT
  841. /* We assume the crashing kernel, and the current symbol table match */
  842. printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
  843. (int)init_saved_seqstat & SEQSTAT_EXCAUSE, init_saved_retx);
  844. printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr);
  845. printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr);
  846. #endif
  847. printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
  848. init_retx);
  849. } else if (_bfin_swrst & RESET_WDOG)
  850. printk(KERN_INFO "Recovering from Watchdog event\n");
  851. else if (_bfin_swrst & RESET_SOFTWARE)
  852. printk(KERN_NOTICE "Reset caused by Software reset\n");
  853. printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
  854. if (bfin_compiled_revid() == 0xffff)
  855. printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
  856. else if (bfin_compiled_revid() == -1)
  857. printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
  858. else
  859. printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
  860. if (likely(CPUID == bfin_cpuid())) {
  861. if (bfin_revid() != bfin_compiled_revid()) {
  862. if (bfin_compiled_revid() == -1)
  863. printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
  864. bfin_revid());
  865. else if (bfin_compiled_revid() != 0xffff) {
  866. printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
  867. bfin_compiled_revid(), bfin_revid());
  868. if (bfin_compiled_revid() > bfin_revid())
  869. panic("Error: you are missing anomaly workarounds for this rev");
  870. }
  871. }
  872. if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
  873. printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
  874. CPU, bfin_revid());
  875. }
  876. printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
  877. printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
  878. cclk / 1000000, sclk / 1000000);
  879. setup_bootmem_allocator();
  880. paging_init();
  881. /* Copy atomic sequences to their fixed location, and sanity check that
  882. these locations are the ones that we advertise to userspace. */
  883. memcpy((void *)FIXED_CODE_START, &fixed_code_start,
  884. FIXED_CODE_END - FIXED_CODE_START);
  885. BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
  886. != SIGRETURN_STUB - FIXED_CODE_START);
  887. BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
  888. != ATOMIC_XCHG32 - FIXED_CODE_START);
  889. BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
  890. != ATOMIC_CAS32 - FIXED_CODE_START);
  891. BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
  892. != ATOMIC_ADD32 - FIXED_CODE_START);
  893. BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
  894. != ATOMIC_SUB32 - FIXED_CODE_START);
  895. BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
  896. != ATOMIC_IOR32 - FIXED_CODE_START);
  897. BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
  898. != ATOMIC_AND32 - FIXED_CODE_START);
  899. BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
  900. != ATOMIC_XOR32 - FIXED_CODE_START);
  901. BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
  902. != SAFE_USER_INSTRUCTION - FIXED_CODE_START);
  903. #ifdef CONFIG_SMP
  904. platform_init_cpus();
  905. #endif
  906. init_exception_vectors();
  907. bfin_cache_init(); /* Initialize caches for the boot CPU */
  908. }
  909. static int __init topology_init(void)
  910. {
  911. unsigned int cpu;
  912. /* Record CPU-private information for the boot processor. */
  913. bfin_setup_cpudata(0);
  914. for_each_possible_cpu(cpu) {
  915. register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
  916. }
  917. return 0;
  918. }
  919. subsys_initcall(topology_init);
  920. /* Get the input clock frequency */
  921. static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
  922. static u_long get_clkin_hz(void)
  923. {
  924. return cached_clkin_hz;
  925. }
  926. static int __init early_init_clkin_hz(char *buf)
  927. {
  928. cached_clkin_hz = simple_strtoul(buf, NULL, 0);
  929. #ifdef BFIN_KERNEL_CLOCK
  930. if (cached_clkin_hz != CONFIG_CLKIN_HZ)
  931. panic("cannot change clkin_hz when reprogramming clocks");
  932. #endif
  933. return 1;
  934. }
  935. early_param("clkin_hz=", early_init_clkin_hz);
  936. /* Get the voltage input multiplier */
  937. static u_long get_vco(void)
  938. {
  939. static u_long cached_vco;
  940. u_long msel, pll_ctl;
  941. /* The assumption here is that VCO never changes at runtime.
  942. * If, someday, we support that, then we'll have to change this.
  943. */
  944. if (cached_vco)
  945. return cached_vco;
  946. pll_ctl = bfin_read_PLL_CTL();
  947. msel = (pll_ctl >> 9) & 0x3F;
  948. if (0 == msel)
  949. msel = 64;
  950. cached_vco = get_clkin_hz();
  951. cached_vco >>= (1 & pll_ctl); /* DF bit */
  952. cached_vco *= msel;
  953. return cached_vco;
  954. }
  955. /* Get the Core clock */
  956. u_long get_cclk(void)
  957. {
  958. static u_long cached_cclk_pll_div, cached_cclk;
  959. u_long csel, ssel;
  960. if (bfin_read_PLL_STAT() & 0x1)
  961. return get_clkin_hz();
  962. ssel = bfin_read_PLL_DIV();
  963. if (ssel == cached_cclk_pll_div)
  964. return cached_cclk;
  965. else
  966. cached_cclk_pll_div = ssel;
  967. csel = ((ssel >> 4) & 0x03);
  968. ssel &= 0xf;
  969. if (ssel && ssel < (1 << csel)) /* SCLK > CCLK */
  970. cached_cclk = get_vco() / ssel;
  971. else
  972. cached_cclk = get_vco() >> csel;
  973. return cached_cclk;
  974. }
  975. EXPORT_SYMBOL(get_cclk);
  976. /* Get the System clock */
  977. u_long get_sclk(void)
  978. {
  979. static u_long cached_sclk;
  980. u_long ssel;
  981. /* The assumption here is that SCLK never changes at runtime.
  982. * If, someday, we support that, then we'll have to change this.
  983. */
  984. if (cached_sclk)
  985. return cached_sclk;
  986. if (bfin_read_PLL_STAT() & 0x1)
  987. return get_clkin_hz();
  988. ssel = bfin_read_PLL_DIV() & 0xf;
  989. if (0 == ssel) {
  990. printk(KERN_WARNING "Invalid System Clock\n");
  991. ssel = 1;
  992. }
  993. cached_sclk = get_vco() / ssel;
  994. return cached_sclk;
  995. }
  996. EXPORT_SYMBOL(get_sclk);
  997. unsigned long sclk_to_usecs(unsigned long sclk)
  998. {
  999. u64 tmp = USEC_PER_SEC * (u64)sclk;
  1000. do_div(tmp, get_sclk());
  1001. return tmp;
  1002. }
  1003. EXPORT_SYMBOL(sclk_to_usecs);
  1004. unsigned long usecs_to_sclk(unsigned long usecs)
  1005. {
  1006. u64 tmp = get_sclk() * (u64)usecs;
  1007. do_div(tmp, USEC_PER_SEC);
  1008. return tmp;
  1009. }
  1010. EXPORT_SYMBOL(usecs_to_sclk);
  1011. /*
  1012. * Get CPU information for use by the procfs.
  1013. */
  1014. static int show_cpuinfo(struct seq_file *m, void *v)
  1015. {
  1016. char *cpu, *mmu, *fpu, *vendor, *cache;
  1017. uint32_t revid;
  1018. int cpu_num = *(unsigned int *)v;
  1019. u_long sclk, cclk;
  1020. u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
  1021. struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
  1022. cpu = CPU;
  1023. mmu = "none";
  1024. fpu = "none";
  1025. revid = bfin_revid();
  1026. sclk = get_sclk();
  1027. cclk = get_cclk();
  1028. switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
  1029. case 0xca:
  1030. vendor = "Analog Devices";
  1031. break;
  1032. default:
  1033. vendor = "unknown";
  1034. break;
  1035. }
  1036. seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
  1037. if (CPUID == bfin_cpuid())
  1038. seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
  1039. else
  1040. seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
  1041. CPUID, bfin_cpuid());
  1042. seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
  1043. "stepping\t: %d ",
  1044. cpu, cclk/1000000, sclk/1000000,
  1045. #ifdef CONFIG_MPU
  1046. "mpu on",
  1047. #else
  1048. "mpu off",
  1049. #endif
  1050. revid);
  1051. if (bfin_revid() != bfin_compiled_revid()) {
  1052. if (bfin_compiled_revid() == -1)
  1053. seq_printf(m, "(Compiled for Rev none)");
  1054. else if (bfin_compiled_revid() == 0xffff)
  1055. seq_printf(m, "(Compiled for Rev any)");
  1056. else
  1057. seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
  1058. }
  1059. seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
  1060. cclk/1000000, cclk%1000000,
  1061. sclk/1000000, sclk%1000000);
  1062. seq_printf(m, "bogomips\t: %lu.%02lu\n"
  1063. "Calibration\t: %lu loops\n",
  1064. (loops_per_jiffy * HZ) / 500000,
  1065. ((loops_per_jiffy * HZ) / 5000) % 100,
  1066. (loops_per_jiffy * HZ));
  1067. /* Check Cache configutation */
  1068. switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
  1069. case ACACHE_BSRAM:
  1070. cache = "dbank-A/B\t: cache/sram";
  1071. dcache_size = 16;
  1072. dsup_banks = 1;
  1073. break;
  1074. case ACACHE_BCACHE:
  1075. cache = "dbank-A/B\t: cache/cache";
  1076. dcache_size = 32;
  1077. dsup_banks = 2;
  1078. break;
  1079. case ASRAM_BSRAM:
  1080. cache = "dbank-A/B\t: sram/sram";
  1081. dcache_size = 0;
  1082. dsup_banks = 0;
  1083. break;
  1084. default:
  1085. cache = "unknown";
  1086. dcache_size = 0;
  1087. dsup_banks = 0;
  1088. break;
  1089. }
  1090. /* Is it turned on? */
  1091. if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
  1092. dcache_size = 0;
  1093. if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
  1094. icache_size = 0;
  1095. seq_printf(m, "cache size\t: %d KB(L1 icache) "
  1096. "%d KB(L1 dcache) %d KB(L2 cache)\n",
  1097. icache_size, dcache_size, 0);
  1098. seq_printf(m, "%s\n", cache);
  1099. seq_printf(m, "external memory\t: "
  1100. #if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
  1101. "cacheable"
  1102. #else
  1103. "uncacheable"
  1104. #endif
  1105. " in instruction cache\n");
  1106. seq_printf(m, "external memory\t: "
  1107. #if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
  1108. "cacheable (write-back)"
  1109. #elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
  1110. "cacheable (write-through)"
  1111. #else
  1112. "uncacheable"
  1113. #endif
  1114. " in data cache\n");
  1115. if (icache_size)
  1116. seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
  1117. BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES);
  1118. else
  1119. seq_printf(m, "icache setup\t: off\n");
  1120. seq_printf(m,
  1121. "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
  1122. dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
  1123. BFIN_DLINES);
  1124. #ifdef __ARCH_SYNC_CORE_DCACHE
  1125. seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", dcache_invld_count[cpu_num]);
  1126. #endif
  1127. #ifdef __ARCH_SYNC_CORE_ICACHE
  1128. seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", icache_invld_count[cpu_num]);
  1129. #endif
  1130. if (cpu_num != num_possible_cpus() - 1)
  1131. return 0;
  1132. if (L2_LENGTH) {
  1133. seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
  1134. seq_printf(m, "L2 SRAM\t\t: "
  1135. #if defined(CONFIG_BFIN_L2_ICACHEABLE)
  1136. "cacheable"
  1137. #else
  1138. "uncacheable"
  1139. #endif
  1140. " in instruction cache\n");
  1141. seq_printf(m, "L2 SRAM\t\t: "
  1142. #if defined(CONFIG_BFIN_L2_WRITEBACK)
  1143. "cacheable (write-back)"
  1144. #elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
  1145. "cacheable (write-through)"
  1146. #else
  1147. "uncacheable"
  1148. #endif
  1149. " in data cache\n");
  1150. }
  1151. seq_printf(m, "board name\t: %s\n", bfin_board_name);
  1152. seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
  1153. physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
  1154. seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n",
  1155. ((int)memory_end - (int)_rambase) >> 10,
  1156. (void *)_rambase,
  1157. (void *)memory_end);
  1158. seq_printf(m, "\n");
  1159. return 0;
  1160. }
  1161. static void *c_start(struct seq_file *m, loff_t *pos)
  1162. {
  1163. if (*pos == 0)
  1164. *pos = first_cpu(cpu_online_map);
  1165. if (*pos >= num_online_cpus())
  1166. return NULL;
  1167. return pos;
  1168. }
  1169. static void *c_next(struct seq_file *m, void *v, loff_t *pos)
  1170. {
  1171. *pos = next_cpu(*pos, cpu_online_map);
  1172. return c_start(m, pos);
  1173. }
  1174. static void c_stop(struct seq_file *m, void *v)
  1175. {
  1176. }
  1177. const struct seq_operations cpuinfo_op = {
  1178. .start = c_start,
  1179. .next = c_next,
  1180. .stop = c_stop,
  1181. .show = show_cpuinfo,
  1182. };
  1183. void __init cmdline_init(const char *r0)
  1184. {
  1185. early_shadow_stamp();
  1186. if (r0)
  1187. strncpy(command_line, r0, COMMAND_LINE_SIZE);
  1188. }