vram.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514
  1. /*
  2. * VRAM manager for OMAP
  3. *
  4. * Copyright (C) 2009 Nokia Corporation
  5. * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along
  17. * with this program; if not, write to the Free Software Foundation, Inc.,
  18. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. /*#define DEBUG*/
  21. #include <linux/kernel.h>
  22. #include <linux/mm.h>
  23. #include <linux/list.h>
  24. #include <linux/slab.h>
  25. #include <linux/seq_file.h>
  26. #include <linux/memblock.h>
  27. #include <linux/completion.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/jiffies.h>
  30. #include <linux/module.h>
  31. #include <asm/setup.h>
  32. #include <plat/vram.h>
  33. #ifdef DEBUG
  34. #define DBG(format, ...) pr_debug("VRAM: " format, ## __VA_ARGS__)
  35. #else
  36. #define DBG(format, ...)
  37. #endif
  38. /* postponed regions are used to temporarily store region information at boot
  39. * time when we cannot yet allocate the region list */
  40. #define MAX_POSTPONED_REGIONS 10
  41. static bool vram_initialized;
  42. static int postponed_cnt;
  43. static struct {
  44. unsigned long paddr;
  45. size_t size;
  46. } postponed_regions[MAX_POSTPONED_REGIONS];
  47. struct vram_alloc {
  48. struct list_head list;
  49. unsigned long paddr;
  50. unsigned pages;
  51. };
  52. struct vram_region {
  53. struct list_head list;
  54. struct list_head alloc_list;
  55. unsigned long paddr;
  56. unsigned pages;
  57. };
  58. static DEFINE_MUTEX(region_mutex);
  59. static LIST_HEAD(region_list);
  60. static struct vram_region *omap_vram_create_region(unsigned long paddr,
  61. unsigned pages)
  62. {
  63. struct vram_region *rm;
  64. rm = kzalloc(sizeof(*rm), GFP_KERNEL);
  65. if (rm) {
  66. INIT_LIST_HEAD(&rm->alloc_list);
  67. rm->paddr = paddr;
  68. rm->pages = pages;
  69. }
  70. return rm;
  71. }
  72. #if 0
  73. static void omap_vram_free_region(struct vram_region *vr)
  74. {
  75. list_del(&vr->list);
  76. kfree(vr);
  77. }
  78. #endif
  79. static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr,
  80. unsigned long paddr, unsigned pages)
  81. {
  82. struct vram_alloc *va;
  83. struct vram_alloc *new;
  84. new = kzalloc(sizeof(*va), GFP_KERNEL);
  85. if (!new)
  86. return NULL;
  87. new->paddr = paddr;
  88. new->pages = pages;
  89. list_for_each_entry(va, &vr->alloc_list, list) {
  90. if (va->paddr > new->paddr)
  91. break;
  92. }
  93. list_add_tail(&new->list, &va->list);
  94. return new;
  95. }
  96. static void omap_vram_free_allocation(struct vram_alloc *va)
  97. {
  98. list_del(&va->list);
  99. kfree(va);
  100. }
  101. int omap_vram_add_region(unsigned long paddr, size_t size)
  102. {
  103. struct vram_region *rm;
  104. unsigned pages;
  105. if (vram_initialized) {
  106. DBG("adding region paddr %08lx size %d\n",
  107. paddr, size);
  108. size &= PAGE_MASK;
  109. pages = size >> PAGE_SHIFT;
  110. rm = omap_vram_create_region(paddr, pages);
  111. if (rm == NULL)
  112. return -ENOMEM;
  113. list_add(&rm->list, &region_list);
  114. } else {
  115. if (postponed_cnt == MAX_POSTPONED_REGIONS)
  116. return -ENOMEM;
  117. postponed_regions[postponed_cnt].paddr = paddr;
  118. postponed_regions[postponed_cnt].size = size;
  119. ++postponed_cnt;
  120. }
  121. return 0;
  122. }
  123. int omap_vram_free(unsigned long paddr, size_t size)
  124. {
  125. struct vram_region *rm;
  126. struct vram_alloc *alloc;
  127. unsigned start, end;
  128. DBG("free mem paddr %08lx size %d\n", paddr, size);
  129. size = PAGE_ALIGN(size);
  130. mutex_lock(&region_mutex);
  131. list_for_each_entry(rm, &region_list, list) {
  132. list_for_each_entry(alloc, &rm->alloc_list, list) {
  133. start = alloc->paddr;
  134. end = alloc->paddr + (alloc->pages >> PAGE_SHIFT);
  135. if (start >= paddr && end < paddr + size)
  136. goto found;
  137. }
  138. }
  139. mutex_unlock(&region_mutex);
  140. return -EINVAL;
  141. found:
  142. omap_vram_free_allocation(alloc);
  143. mutex_unlock(&region_mutex);
  144. return 0;
  145. }
  146. EXPORT_SYMBOL(omap_vram_free);
  147. static int _omap_vram_reserve(unsigned long paddr, unsigned pages)
  148. {
  149. struct vram_region *rm;
  150. struct vram_alloc *alloc;
  151. size_t size;
  152. size = pages << PAGE_SHIFT;
  153. list_for_each_entry(rm, &region_list, list) {
  154. unsigned long start, end;
  155. DBG("checking region %lx %d\n", rm->paddr, rm->pages);
  156. start = rm->paddr;
  157. end = start + (rm->pages << PAGE_SHIFT) - 1;
  158. if (start > paddr || end < paddr + size - 1)
  159. continue;
  160. DBG("block ok, checking allocs\n");
  161. list_for_each_entry(alloc, &rm->alloc_list, list) {
  162. end = alloc->paddr - 1;
  163. if (start <= paddr && end >= paddr + size - 1)
  164. goto found;
  165. start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
  166. }
  167. end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1;
  168. if (!(start <= paddr && end >= paddr + size - 1))
  169. continue;
  170. found:
  171. DBG("found area start %lx, end %lx\n", start, end);
  172. if (omap_vram_create_allocation(rm, paddr, pages) == NULL)
  173. return -ENOMEM;
  174. return 0;
  175. }
  176. return -ENOMEM;
  177. }
  178. int omap_vram_reserve(unsigned long paddr, size_t size)
  179. {
  180. unsigned pages;
  181. int r;
  182. DBG("reserve mem paddr %08lx size %d\n", paddr, size);
  183. size = PAGE_ALIGN(size);
  184. pages = size >> PAGE_SHIFT;
  185. mutex_lock(&region_mutex);
  186. r = _omap_vram_reserve(paddr, pages);
  187. mutex_unlock(&region_mutex);
  188. return r;
  189. }
  190. EXPORT_SYMBOL(omap_vram_reserve);
  191. static int _omap_vram_alloc(unsigned pages, unsigned long *paddr)
  192. {
  193. struct vram_region *rm;
  194. struct vram_alloc *alloc;
  195. list_for_each_entry(rm, &region_list, list) {
  196. unsigned long start, end;
  197. DBG("checking region %lx %d\n", rm->paddr, rm->pages);
  198. start = rm->paddr;
  199. list_for_each_entry(alloc, &rm->alloc_list, list) {
  200. end = alloc->paddr;
  201. if (end - start >= pages << PAGE_SHIFT)
  202. goto found;
  203. start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
  204. }
  205. end = rm->paddr + (rm->pages << PAGE_SHIFT);
  206. found:
  207. if (end - start < pages << PAGE_SHIFT)
  208. continue;
  209. DBG("found %lx, end %lx\n", start, end);
  210. alloc = omap_vram_create_allocation(rm, start, pages);
  211. if (alloc == NULL)
  212. return -ENOMEM;
  213. *paddr = start;
  214. return 0;
  215. }
  216. return -ENOMEM;
  217. }
  218. int omap_vram_alloc(size_t size, unsigned long *paddr)
  219. {
  220. unsigned pages;
  221. int r;
  222. BUG_ON(!size);
  223. DBG("alloc mem size %d\n", size);
  224. size = PAGE_ALIGN(size);
  225. pages = size >> PAGE_SHIFT;
  226. mutex_lock(&region_mutex);
  227. r = _omap_vram_alloc(pages, paddr);
  228. mutex_unlock(&region_mutex);
  229. return r;
  230. }
  231. EXPORT_SYMBOL(omap_vram_alloc);
  232. void omap_vram_get_info(unsigned long *vram,
  233. unsigned long *free_vram,
  234. unsigned long *largest_free_block)
  235. {
  236. struct vram_region *vr;
  237. struct vram_alloc *va;
  238. *vram = 0;
  239. *free_vram = 0;
  240. *largest_free_block = 0;
  241. mutex_lock(&region_mutex);
  242. list_for_each_entry(vr, &region_list, list) {
  243. unsigned free;
  244. unsigned long pa;
  245. pa = vr->paddr;
  246. *vram += vr->pages << PAGE_SHIFT;
  247. list_for_each_entry(va, &vr->alloc_list, list) {
  248. free = va->paddr - pa;
  249. *free_vram += free;
  250. if (free > *largest_free_block)
  251. *largest_free_block = free;
  252. pa = va->paddr + (va->pages << PAGE_SHIFT);
  253. }
  254. free = vr->paddr + (vr->pages << PAGE_SHIFT) - pa;
  255. *free_vram += free;
  256. if (free > *largest_free_block)
  257. *largest_free_block = free;
  258. }
  259. mutex_unlock(&region_mutex);
  260. }
  261. EXPORT_SYMBOL(omap_vram_get_info);
  262. #if defined(CONFIG_DEBUG_FS)
  263. static int vram_debug_show(struct seq_file *s, void *unused)
  264. {
  265. struct vram_region *vr;
  266. struct vram_alloc *va;
  267. unsigned size;
  268. mutex_lock(&region_mutex);
  269. list_for_each_entry(vr, &region_list, list) {
  270. size = vr->pages << PAGE_SHIFT;
  271. seq_printf(s, "%08lx-%08lx (%d bytes)\n",
  272. vr->paddr, vr->paddr + size - 1,
  273. size);
  274. list_for_each_entry(va, &vr->alloc_list, list) {
  275. size = va->pages << PAGE_SHIFT;
  276. seq_printf(s, " %08lx-%08lx (%d bytes)\n",
  277. va->paddr, va->paddr + size - 1,
  278. size);
  279. }
  280. }
  281. mutex_unlock(&region_mutex);
  282. return 0;
  283. }
  284. static int vram_debug_open(struct inode *inode, struct file *file)
  285. {
  286. return single_open(file, vram_debug_show, inode->i_private);
  287. }
  288. static const struct file_operations vram_debug_fops = {
  289. .open = vram_debug_open,
  290. .read = seq_read,
  291. .llseek = seq_lseek,
  292. .release = single_release,
  293. };
  294. static int __init omap_vram_create_debugfs(void)
  295. {
  296. struct dentry *d;
  297. d = debugfs_create_file("vram", S_IRUGO, NULL,
  298. NULL, &vram_debug_fops);
  299. if (IS_ERR(d))
  300. return PTR_ERR(d);
  301. return 0;
  302. }
  303. #endif
  304. static __init int omap_vram_init(void)
  305. {
  306. int i;
  307. vram_initialized = 1;
  308. for (i = 0; i < postponed_cnt; i++)
  309. omap_vram_add_region(postponed_regions[i].paddr,
  310. postponed_regions[i].size);
  311. #ifdef CONFIG_DEBUG_FS
  312. if (omap_vram_create_debugfs())
  313. pr_err("VRAM: Failed to create debugfs file\n");
  314. #endif
  315. return 0;
  316. }
  317. arch_initcall(omap_vram_init);
  318. /* boottime vram alloc stuff */
  319. /* set from board file */
  320. static u32 omap_vram_sdram_start __initdata;
  321. static u32 omap_vram_sdram_size __initdata;
  322. /* set from kernel cmdline */
  323. static u32 omap_vram_def_sdram_size __initdata;
  324. static u32 omap_vram_def_sdram_start __initdata;
  325. static int __init omap_vram_early_vram(char *p)
  326. {
  327. omap_vram_def_sdram_size = memparse(p, &p);
  328. if (*p == ',')
  329. omap_vram_def_sdram_start = simple_strtoul(p + 1, &p, 16);
  330. return 0;
  331. }
  332. early_param("vram", omap_vram_early_vram);
  333. /*
  334. * Called from map_io. We need to call to this early enough so that we
  335. * can reserve the fixed SDRAM regions before VM could get hold of them.
  336. */
  337. void __init omap_vram_reserve_sdram_memblock(void)
  338. {
  339. u32 paddr;
  340. u32 size = 0;
  341. /* cmdline arg overrides the board file definition */
  342. if (omap_vram_def_sdram_size) {
  343. size = omap_vram_def_sdram_size;
  344. paddr = omap_vram_def_sdram_start;
  345. }
  346. if (!size) {
  347. size = omap_vram_sdram_size;
  348. paddr = omap_vram_sdram_start;
  349. }
  350. #ifdef CONFIG_OMAP2_VRAM_SIZE
  351. if (!size) {
  352. size = CONFIG_OMAP2_VRAM_SIZE * 1024 * 1024;
  353. paddr = 0;
  354. }
  355. #endif
  356. if (!size)
  357. return;
  358. size = ALIGN(size, SZ_2M);
  359. if (paddr) {
  360. if (paddr & ~PAGE_MASK) {
  361. pr_err("VRAM start address 0x%08x not page aligned\n",
  362. paddr);
  363. return;
  364. }
  365. if (!memblock_is_region_memory(paddr, size)) {
  366. pr_err("Illegal SDRAM region 0x%08x..0x%08x for VRAM\n",
  367. paddr, paddr + size - 1);
  368. return;
  369. }
  370. if (memblock_is_region_reserved(paddr, size)) {
  371. pr_err("FB: failed to reserve VRAM - busy\n");
  372. return;
  373. }
  374. if (memblock_reserve(paddr, size) < 0) {
  375. pr_err("FB: failed to reserve VRAM - no memory\n");
  376. return;
  377. }
  378. } else {
  379. paddr = memblock_alloc(size, SZ_2M);
  380. }
  381. memblock_free(paddr, size);
  382. memblock_remove(paddr, size);
  383. omap_vram_add_region(paddr, size);
  384. pr_info("Reserving %u bytes SDRAM for VRAM\n", size);
  385. }
  386. void __init omap_vram_set_sdram_vram(u32 size, u32 start)
  387. {
  388. omap_vram_sdram_start = start;
  389. omap_vram_sdram_size = size;
  390. }