lmb.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. /*
  2. * Procedures for interfacing to Open Firmware.
  3. *
  4. * Peter Bergner, IBM Corp. June 2001.
  5. * Copyright (C) 2001 Peter Bergner.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <linux/config.h>
  13. #include <linux/kernel.h>
  14. #include <linux/init.h>
  15. #include <linux/bitops.h>
  16. #include <asm/types.h>
  17. #include <asm/page.h>
  18. #include <asm/prom.h>
  19. #include <asm/lmb.h>
  20. #include <asm/abs_addr.h>
  21. struct lmb lmb;
  22. #undef DEBUG
  23. void lmb_dump_all(void)
  24. {
  25. #ifdef DEBUG
  26. unsigned long i;
  27. struct lmb *_lmb = &lmb;
  28. udbg_printf("lmb_dump_all:\n");
  29. udbg_printf(" memory.cnt = 0x%lx\n",
  30. _lmb->memory.cnt);
  31. udbg_printf(" memory.size = 0x%lx\n",
  32. _lmb->memory.size);
  33. for (i=0; i < _lmb->memory.cnt ;i++) {
  34. udbg_printf(" memory.region[0x%x].base = 0x%lx\n",
  35. i, _lmb->memory.region[i].base);
  36. udbg_printf(" .physbase = 0x%lx\n",
  37. _lmb->memory.region[i].physbase);
  38. udbg_printf(" .size = 0x%lx\n",
  39. _lmb->memory.region[i].size);
  40. }
  41. udbg_printf("\n reserved.cnt = 0x%lx\n",
  42. _lmb->reserved.cnt);
  43. udbg_printf(" reserved.size = 0x%lx\n",
  44. _lmb->reserved.size);
  45. for (i=0; i < _lmb->reserved.cnt ;i++) {
  46. udbg_printf(" reserved.region[0x%x].base = 0x%lx\n",
  47. i, _lmb->reserved.region[i].base);
  48. udbg_printf(" .physbase = 0x%lx\n",
  49. _lmb->reserved.region[i].physbase);
  50. udbg_printf(" .size = 0x%lx\n",
  51. _lmb->reserved.region[i].size);
  52. }
  53. #endif /* DEBUG */
  54. }
  55. static unsigned long __init
  56. lmb_addrs_overlap(unsigned long base1, unsigned long size1,
  57. unsigned long base2, unsigned long size2)
  58. {
  59. return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
  60. }
  61. static long __init
  62. lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
  63. unsigned long base2, unsigned long size2)
  64. {
  65. if (base2 == base1 + size1)
  66. return 1;
  67. else if (base1 == base2 + size2)
  68. return -1;
  69. return 0;
  70. }
  71. static long __init
  72. lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
  73. {
  74. unsigned long base1 = rgn->region[r1].base;
  75. unsigned long size1 = rgn->region[r1].size;
  76. unsigned long base2 = rgn->region[r2].base;
  77. unsigned long size2 = rgn->region[r2].size;
  78. return lmb_addrs_adjacent(base1, size1, base2, size2);
  79. }
  80. /* Assumption: base addr of region 1 < base addr of region 2 */
  81. static void __init
  82. lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
  83. {
  84. unsigned long i;
  85. rgn->region[r1].size += rgn->region[r2].size;
  86. for (i=r2; i < rgn->cnt-1; i++) {
  87. rgn->region[i].base = rgn->region[i+1].base;
  88. rgn->region[i].physbase = rgn->region[i+1].physbase;
  89. rgn->region[i].size = rgn->region[i+1].size;
  90. }
  91. rgn->cnt--;
  92. }
  93. /* This routine called with relocation disabled. */
  94. void __init
  95. lmb_init(void)
  96. {
  97. struct lmb *_lmb = &lmb;
  98. /* Create a dummy zero size LMB which will get coalesced away later.
  99. * This simplifies the lmb_add() code below...
  100. */
  101. _lmb->memory.region[0].base = 0;
  102. _lmb->memory.region[0].size = 0;
  103. _lmb->memory.cnt = 1;
  104. /* Ditto. */
  105. _lmb->reserved.region[0].base = 0;
  106. _lmb->reserved.region[0].size = 0;
  107. _lmb->reserved.cnt = 1;
  108. }
  109. /* This routine called with relocation disabled. */
  110. void __init
  111. lmb_analyze(void)
  112. {
  113. unsigned long i;
  114. unsigned long mem_size = 0;
  115. unsigned long size_mask = 0;
  116. struct lmb *_lmb = &lmb;
  117. #ifdef CONFIG_MSCHUNKS
  118. unsigned long physbase = 0;
  119. #endif
  120. for (i=0; i < _lmb->memory.cnt; i++) {
  121. unsigned long lmb_size;
  122. lmb_size = _lmb->memory.region[i].size;
  123. #ifdef CONFIG_MSCHUNKS
  124. _lmb->memory.region[i].physbase = physbase;
  125. physbase += lmb_size;
  126. #else
  127. _lmb->memory.region[i].physbase = _lmb->memory.region[i].base;
  128. #endif
  129. mem_size += lmb_size;
  130. size_mask |= lmb_size;
  131. }
  132. _lmb->memory.size = mem_size;
  133. }
  134. /* This routine called with relocation disabled. */
  135. static long __init
  136. lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
  137. {
  138. unsigned long i, coalesced = 0;
  139. long adjacent;
  140. /* First try and coalesce this LMB with another. */
  141. for (i=0; i < rgn->cnt; i++) {
  142. unsigned long rgnbase = rgn->region[i].base;
  143. unsigned long rgnsize = rgn->region[i].size;
  144. adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
  145. if ( adjacent > 0 ) {
  146. rgn->region[i].base -= size;
  147. rgn->region[i].physbase -= size;
  148. rgn->region[i].size += size;
  149. coalesced++;
  150. break;
  151. }
  152. else if ( adjacent < 0 ) {
  153. rgn->region[i].size += size;
  154. coalesced++;
  155. break;
  156. }
  157. }
  158. if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
  159. lmb_coalesce_regions(rgn, i, i+1);
  160. coalesced++;
  161. }
  162. if ( coalesced ) {
  163. return coalesced;
  164. } else if ( rgn->cnt >= MAX_LMB_REGIONS ) {
  165. return -1;
  166. }
  167. /* Couldn't coalesce the LMB, so add it to the sorted table. */
  168. for (i=rgn->cnt-1; i >= 0; i--) {
  169. if (base < rgn->region[i].base) {
  170. rgn->region[i+1].base = rgn->region[i].base;
  171. rgn->region[i+1].physbase = rgn->region[i].physbase;
  172. rgn->region[i+1].size = rgn->region[i].size;
  173. } else {
  174. rgn->region[i+1].base = base;
  175. rgn->region[i+1].physbase = lmb_abs_to_phys(base);
  176. rgn->region[i+1].size = size;
  177. break;
  178. }
  179. }
  180. rgn->cnt++;
  181. return 0;
  182. }
  183. /* This routine called with relocation disabled. */
  184. long __init
  185. lmb_add(unsigned long base, unsigned long size)
  186. {
  187. struct lmb *_lmb = &lmb;
  188. struct lmb_region *_rgn = &(_lmb->memory);
  189. /* On pSeries LPAR systems, the first LMB is our RMO region. */
  190. if ( base == 0 )
  191. _lmb->rmo_size = size;
  192. return lmb_add_region(_rgn, base, size);
  193. }
  194. long __init
  195. lmb_reserve(unsigned long base, unsigned long size)
  196. {
  197. struct lmb *_lmb = &lmb;
  198. struct lmb_region *_rgn = &(_lmb->reserved);
  199. return lmb_add_region(_rgn, base, size);
  200. }
  201. long __init
  202. lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
  203. {
  204. unsigned long i;
  205. for (i=0; i < rgn->cnt; i++) {
  206. unsigned long rgnbase = rgn->region[i].base;
  207. unsigned long rgnsize = rgn->region[i].size;
  208. if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
  209. break;
  210. }
  211. }
  212. return (i < rgn->cnt) ? i : -1;
  213. }
  214. unsigned long __init
  215. lmb_alloc(unsigned long size, unsigned long align)
  216. {
  217. return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
  218. }
  219. unsigned long __init
  220. lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
  221. {
  222. long i, j;
  223. unsigned long base = 0;
  224. struct lmb *_lmb = &lmb;
  225. struct lmb_region *_mem = &(_lmb->memory);
  226. struct lmb_region *_rsv = &(_lmb->reserved);
  227. for (i=_mem->cnt-1; i >= 0; i--) {
  228. unsigned long lmbbase = _mem->region[i].base;
  229. unsigned long lmbsize = _mem->region[i].size;
  230. if ( max_addr == LMB_ALLOC_ANYWHERE )
  231. base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
  232. else if ( lmbbase < max_addr )
  233. base = _ALIGN_DOWN(min(lmbbase+lmbsize,max_addr)-size, align);
  234. else
  235. continue;
  236. while ( (lmbbase <= base) &&
  237. ((j = lmb_overlaps_region(_rsv,base,size)) >= 0) ) {
  238. base = _ALIGN_DOWN(_rsv->region[j].base-size, align);
  239. }
  240. if ( (base != 0) && (lmbbase <= base) )
  241. break;
  242. }
  243. if ( i < 0 )
  244. return 0;
  245. lmb_add_region(_rsv, base, size);
  246. return base;
  247. }
  248. unsigned long __init
  249. lmb_phys_mem_size(void)
  250. {
  251. struct lmb *_lmb = &lmb;
  252. #ifdef CONFIG_MSCHUNKS
  253. return _lmb->memory.size;
  254. #else
  255. struct lmb_region *_mem = &(_lmb->memory);
  256. unsigned long total = 0;
  257. int i;
  258. /* add all physical memory to the bootmem map */
  259. for (i=0; i < _mem->cnt; i++)
  260. total += _mem->region[i].size;
  261. return total;
  262. #endif /* CONFIG_MSCHUNKS */
  263. }
  264. unsigned long __init
  265. lmb_end_of_DRAM(void)
  266. {
  267. struct lmb *_lmb = &lmb;
  268. struct lmb_region *_mem = &(_lmb->memory);
  269. int idx = _mem->cnt - 1;
  270. #ifdef CONFIG_MSCHUNKS
  271. return (_mem->region[idx].physbase + _mem->region[idx].size);
  272. #else
  273. return (_mem->region[idx].base + _mem->region[idx].size);
  274. #endif /* CONFIG_MSCHUNKS */
  275. return 0;
  276. }
  277. unsigned long __init
  278. lmb_abs_to_phys(unsigned long aa)
  279. {
  280. unsigned long i, pa = aa;
  281. struct lmb *_lmb = &lmb;
  282. struct lmb_region *_mem = &(_lmb->memory);
  283. for (i=0; i < _mem->cnt; i++) {
  284. unsigned long lmbbase = _mem->region[i].base;
  285. unsigned long lmbsize = _mem->region[i].size;
  286. if ( lmb_addrs_overlap(aa,1,lmbbase,lmbsize) ) {
  287. pa = _mem->region[i].physbase + (aa - lmbbase);
  288. break;
  289. }
  290. }
  291. return pa;
  292. }
  293. /*
  294. * Truncate the lmb list to memory_limit if it's set
  295. * You must call lmb_analyze() after this.
  296. */
  297. void __init lmb_enforce_memory_limit(void)
  298. {
  299. extern unsigned long memory_limit;
  300. unsigned long i, limit;
  301. struct lmb_region *mem = &(lmb.memory);
  302. if (! memory_limit)
  303. return;
  304. limit = memory_limit;
  305. for (i = 0; i < mem->cnt; i++) {
  306. if (limit > mem->region[i].size) {
  307. limit -= mem->region[i].size;
  308. continue;
  309. }
  310. mem->region[i].size = limit;
  311. mem->cnt = i + 1;
  312. break;
  313. }
  314. }