sram-alloc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859
  1. /*
  2. * SRAM allocator for Blackfin on-chip memory
  3. *
  4. * Copyright 2004-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/types.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/ioport.h>
  13. #include <linux/fcntl.h>
  14. #include <linux/init.h>
  15. #include <linux/poll.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/rtc.h>
  19. #include <asm/blackfin.h>
  20. #include <asm/mem_map.h>
  21. #include "blackfin_sram.h"
  22. /* the data structure for L1 scratchpad and DATA SRAM */
  23. struct sram_piece {
  24. void *paddr;
  25. int size;
  26. pid_t pid;
  27. struct sram_piece *next;
  28. };
  29. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
  30. static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
  31. static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
  32. #if L1_DATA_A_LENGTH != 0
  33. static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
  34. static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
  35. #endif
  36. #if L1_DATA_B_LENGTH != 0
  37. static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
  38. static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
  39. #endif
  40. #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
  41. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
  42. #endif
  43. #if L1_CODE_LENGTH != 0
  44. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
  45. static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
  46. static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
  47. #endif
  48. #if L2_LENGTH != 0
  49. static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
  50. static struct sram_piece free_l2_sram_head, used_l2_sram_head;
  51. #endif
  52. static struct kmem_cache *sram_piece_cache;
  53. /* L1 Scratchpad SRAM initialization function */
  54. static void __init l1sram_init(void)
  55. {
  56. unsigned int cpu;
  57. unsigned long reserve;
  58. #ifdef CONFIG_SMP
  59. reserve = 0;
  60. #else
  61. reserve = sizeof(struct l1_scratch_task_info);
  62. #endif
  63. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  64. per_cpu(free_l1_ssram_head, cpu).next =
  65. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  66. if (!per_cpu(free_l1_ssram_head, cpu).next) {
  67. printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
  68. return;
  69. }
  70. per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
  71. per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
  72. per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
  73. per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
  74. per_cpu(used_l1_ssram_head, cpu).next = NULL;
  75. /* mutex initialize */
  76. spin_lock_init(&per_cpu(l1sram_lock, cpu));
  77. printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
  78. L1_SCRATCH_LENGTH >> 10);
  79. }
  80. }
  81. static void __init l1_data_sram_init(void)
  82. {
  83. #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
  84. unsigned int cpu;
  85. #endif
  86. #if L1_DATA_A_LENGTH != 0
  87. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  88. per_cpu(free_l1_data_A_sram_head, cpu).next =
  89. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  90. if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
  91. printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
  92. return;
  93. }
  94. per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
  95. (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
  96. per_cpu(free_l1_data_A_sram_head, cpu).next->size =
  97. L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
  98. per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
  99. per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
  100. per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
  101. printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
  102. L1_DATA_A_LENGTH >> 10,
  103. per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
  104. }
  105. #endif
  106. #if L1_DATA_B_LENGTH != 0
  107. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  108. per_cpu(free_l1_data_B_sram_head, cpu).next =
  109. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  110. if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
  111. printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
  112. return;
  113. }
  114. per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
  115. (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
  116. per_cpu(free_l1_data_B_sram_head, cpu).next->size =
  117. L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
  118. per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
  119. per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
  120. per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
  121. printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
  122. L1_DATA_B_LENGTH >> 10,
  123. per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
  124. /* mutex initialize */
  125. }
  126. #endif
  127. #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
  128. for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
  129. spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
  130. #endif
  131. }
  132. static void __init l1_inst_sram_init(void)
  133. {
  134. #if L1_CODE_LENGTH != 0
  135. unsigned int cpu;
  136. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  137. per_cpu(free_l1_inst_sram_head, cpu).next =
  138. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  139. if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
  140. printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
  141. return;
  142. }
  143. per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
  144. (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
  145. per_cpu(free_l1_inst_sram_head, cpu).next->size =
  146. L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
  147. per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
  148. per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
  149. per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
  150. printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
  151. L1_CODE_LENGTH >> 10,
  152. per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
  153. /* mutex initialize */
  154. spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
  155. }
  156. #endif
  157. }
  158. static void __init l2_sram_init(void)
  159. {
  160. #if L2_LENGTH != 0
  161. free_l2_sram_head.next =
  162. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  163. if (!free_l2_sram_head.next) {
  164. printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
  165. return;
  166. }
  167. free_l2_sram_head.next->paddr =
  168. (void *)L2_START + (_ebss_l2 - _stext_l2);
  169. free_l2_sram_head.next->size =
  170. L2_LENGTH - (_ebss_l2 - _stext_l2);
  171. free_l2_sram_head.next->pid = 0;
  172. free_l2_sram_head.next->next = NULL;
  173. used_l2_sram_head.next = NULL;
  174. printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
  175. L2_LENGTH >> 10,
  176. free_l2_sram_head.next->size >> 10);
  177. /* mutex initialize */
  178. spin_lock_init(&l2_sram_lock);
  179. #endif
  180. }
  181. static int __init bfin_sram_init(void)
  182. {
  183. sram_piece_cache = kmem_cache_create("sram_piece_cache",
  184. sizeof(struct sram_piece),
  185. 0, SLAB_PANIC, NULL);
  186. l1sram_init();
  187. l1_data_sram_init();
  188. l1_inst_sram_init();
  189. l2_sram_init();
  190. return 0;
  191. }
  192. pure_initcall(bfin_sram_init);
  193. /* SRAM allocate function */
  194. static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
  195. struct sram_piece *pused_head)
  196. {
  197. struct sram_piece *pslot, *plast, *pavail;
  198. if (size <= 0 || !pfree_head || !pused_head)
  199. return NULL;
  200. /* Align the size */
  201. size = (size + 3) & ~3;
  202. pslot = pfree_head->next;
  203. plast = pfree_head;
  204. /* search an available piece slot */
  205. while (pslot != NULL && size > pslot->size) {
  206. plast = pslot;
  207. pslot = pslot->next;
  208. }
  209. if (!pslot)
  210. return NULL;
  211. if (pslot->size == size) {
  212. plast->next = pslot->next;
  213. pavail = pslot;
  214. } else {
  215. pavail = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  216. if (!pavail)
  217. return NULL;
  218. pavail->paddr = pslot->paddr;
  219. pavail->size = size;
  220. pslot->paddr += size;
  221. pslot->size -= size;
  222. }
  223. pavail->pid = current->pid;
  224. pslot = pused_head->next;
  225. plast = pused_head;
  226. /* insert new piece into used piece list !!! */
  227. while (pslot != NULL && pavail->paddr < pslot->paddr) {
  228. plast = pslot;
  229. pslot = pslot->next;
  230. }
  231. pavail->next = pslot;
  232. plast->next = pavail;
  233. return pavail->paddr;
  234. }
  235. /* Allocate the largest available block. */
  236. static void *_sram_alloc_max(struct sram_piece *pfree_head,
  237. struct sram_piece *pused_head,
  238. unsigned long *psize)
  239. {
  240. struct sram_piece *pslot, *pmax;
  241. if (!pfree_head || !pused_head)
  242. return NULL;
  243. pmax = pslot = pfree_head->next;
  244. /* search an available piece slot */
  245. while (pslot != NULL) {
  246. if (pslot->size > pmax->size)
  247. pmax = pslot;
  248. pslot = pslot->next;
  249. }
  250. if (!pmax)
  251. return NULL;
  252. *psize = pmax->size;
  253. return _sram_alloc(*psize, pfree_head, pused_head);
  254. }
  255. /* SRAM free function */
  256. static int _sram_free(const void *addr,
  257. struct sram_piece *pfree_head,
  258. struct sram_piece *pused_head)
  259. {
  260. struct sram_piece *pslot, *plast, *pavail;
  261. if (!pfree_head || !pused_head)
  262. return -1;
  263. /* search the relevant memory slot */
  264. pslot = pused_head->next;
  265. plast = pused_head;
  266. /* search an available piece slot */
  267. while (pslot != NULL && pslot->paddr != addr) {
  268. plast = pslot;
  269. pslot = pslot->next;
  270. }
  271. if (!pslot)
  272. return -1;
  273. plast->next = pslot->next;
  274. pavail = pslot;
  275. pavail->pid = 0;
  276. /* insert free pieces back to the free list */
  277. pslot = pfree_head->next;
  278. plast = pfree_head;
  279. while (pslot != NULL && addr > pslot->paddr) {
  280. plast = pslot;
  281. pslot = pslot->next;
  282. }
  283. if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
  284. plast->size += pavail->size;
  285. kmem_cache_free(sram_piece_cache, pavail);
  286. } else {
  287. pavail->next = plast->next;
  288. plast->next = pavail;
  289. plast = pavail;
  290. }
  291. if (pslot && plast->paddr + plast->size == pslot->paddr) {
  292. plast->size += pslot->size;
  293. plast->next = pslot->next;
  294. kmem_cache_free(sram_piece_cache, pslot);
  295. }
  296. return 0;
  297. }
  298. int sram_free(const void *addr)
  299. {
  300. #if L1_CODE_LENGTH != 0
  301. if (addr >= (void *)get_l1_code_start()
  302. && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
  303. return l1_inst_sram_free(addr);
  304. else
  305. #endif
  306. #if L1_DATA_A_LENGTH != 0
  307. if (addr >= (void *)get_l1_data_a_start()
  308. && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
  309. return l1_data_A_sram_free(addr);
  310. else
  311. #endif
  312. #if L1_DATA_B_LENGTH != 0
  313. if (addr >= (void *)get_l1_data_b_start()
  314. && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
  315. return l1_data_B_sram_free(addr);
  316. else
  317. #endif
  318. #if L2_LENGTH != 0
  319. if (addr >= (void *)L2_START
  320. && addr < (void *)(L2_START + L2_LENGTH))
  321. return l2_sram_free(addr);
  322. else
  323. #endif
  324. return -1;
  325. }
  326. EXPORT_SYMBOL(sram_free);
  327. void *l1_data_A_sram_alloc(size_t size)
  328. {
  329. #if L1_DATA_A_LENGTH != 0
  330. unsigned long flags;
  331. void *addr;
  332. unsigned int cpu;
  333. cpu = get_cpu();
  334. /* add mutex operation */
  335. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  336. addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
  337. &per_cpu(used_l1_data_A_sram_head, cpu));
  338. /* add mutex operation */
  339. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  340. put_cpu();
  341. pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
  342. (long unsigned int)addr, size);
  343. return addr;
  344. #else
  345. return NULL;
  346. #endif
  347. }
  348. EXPORT_SYMBOL(l1_data_A_sram_alloc);
  349. int l1_data_A_sram_free(const void *addr)
  350. {
  351. #if L1_DATA_A_LENGTH != 0
  352. unsigned long flags;
  353. int ret;
  354. unsigned int cpu;
  355. cpu = get_cpu();
  356. /* add mutex operation */
  357. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  358. ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
  359. &per_cpu(used_l1_data_A_sram_head, cpu));
  360. /* add mutex operation */
  361. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  362. put_cpu();
  363. return ret;
  364. #else
  365. return -1;
  366. #endif
  367. }
  368. EXPORT_SYMBOL(l1_data_A_sram_free);
  369. void *l1_data_B_sram_alloc(size_t size)
  370. {
  371. #if L1_DATA_B_LENGTH != 0
  372. unsigned long flags;
  373. void *addr;
  374. unsigned int cpu;
  375. cpu = get_cpu();
  376. /* add mutex operation */
  377. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  378. addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
  379. &per_cpu(used_l1_data_B_sram_head, cpu));
  380. /* add mutex operation */
  381. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  382. put_cpu();
  383. pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
  384. (long unsigned int)addr, size);
  385. return addr;
  386. #else
  387. return NULL;
  388. #endif
  389. }
  390. EXPORT_SYMBOL(l1_data_B_sram_alloc);
  391. int l1_data_B_sram_free(const void *addr)
  392. {
  393. #if L1_DATA_B_LENGTH != 0
  394. unsigned long flags;
  395. int ret;
  396. unsigned int cpu;
  397. cpu = get_cpu();
  398. /* add mutex operation */
  399. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  400. ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
  401. &per_cpu(used_l1_data_B_sram_head, cpu));
  402. /* add mutex operation */
  403. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  404. put_cpu();
  405. return ret;
  406. #else
  407. return -1;
  408. #endif
  409. }
  410. EXPORT_SYMBOL(l1_data_B_sram_free);
  411. void *l1_data_sram_alloc(size_t size)
  412. {
  413. void *addr = l1_data_A_sram_alloc(size);
  414. if (!addr)
  415. addr = l1_data_B_sram_alloc(size);
  416. return addr;
  417. }
  418. EXPORT_SYMBOL(l1_data_sram_alloc);
  419. void *l1_data_sram_zalloc(size_t size)
  420. {
  421. void *addr = l1_data_sram_alloc(size);
  422. if (addr)
  423. memset(addr, 0x00, size);
  424. return addr;
  425. }
  426. EXPORT_SYMBOL(l1_data_sram_zalloc);
  427. int l1_data_sram_free(const void *addr)
  428. {
  429. int ret;
  430. ret = l1_data_A_sram_free(addr);
  431. if (ret == -1)
  432. ret = l1_data_B_sram_free(addr);
  433. return ret;
  434. }
  435. EXPORT_SYMBOL(l1_data_sram_free);
  436. void *l1_inst_sram_alloc(size_t size)
  437. {
  438. #if L1_CODE_LENGTH != 0
  439. unsigned long flags;
  440. void *addr;
  441. unsigned int cpu;
  442. cpu = get_cpu();
  443. /* add mutex operation */
  444. spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
  445. addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
  446. &per_cpu(used_l1_inst_sram_head, cpu));
  447. /* add mutex operation */
  448. spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
  449. put_cpu();
  450. pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
  451. (long unsigned int)addr, size);
  452. return addr;
  453. #else
  454. return NULL;
  455. #endif
  456. }
  457. EXPORT_SYMBOL(l1_inst_sram_alloc);
  458. int l1_inst_sram_free(const void *addr)
  459. {
  460. #if L1_CODE_LENGTH != 0
  461. unsigned long flags;
  462. int ret;
  463. unsigned int cpu;
  464. cpu = get_cpu();
  465. /* add mutex operation */
  466. spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
  467. ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
  468. &per_cpu(used_l1_inst_sram_head, cpu));
  469. /* add mutex operation */
  470. spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
  471. put_cpu();
  472. return ret;
  473. #else
  474. return -1;
  475. #endif
  476. }
  477. EXPORT_SYMBOL(l1_inst_sram_free);
  478. /* L1 Scratchpad memory allocate function */
  479. void *l1sram_alloc(size_t size)
  480. {
  481. unsigned long flags;
  482. void *addr;
  483. unsigned int cpu;
  484. cpu = get_cpu();
  485. /* add mutex operation */
  486. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  487. addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
  488. &per_cpu(used_l1_ssram_head, cpu));
  489. /* add mutex operation */
  490. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  491. put_cpu();
  492. return addr;
  493. }
  494. /* L1 Scratchpad memory allocate function */
  495. void *l1sram_alloc_max(size_t *psize)
  496. {
  497. unsigned long flags;
  498. void *addr;
  499. unsigned int cpu;
  500. cpu = get_cpu();
  501. /* add mutex operation */
  502. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  503. addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
  504. &per_cpu(used_l1_ssram_head, cpu), psize);
  505. /* add mutex operation */
  506. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  507. put_cpu();
  508. return addr;
  509. }
  510. /* L1 Scratchpad memory free function */
  511. int l1sram_free(const void *addr)
  512. {
  513. unsigned long flags;
  514. int ret;
  515. unsigned int cpu;
  516. cpu = get_cpu();
  517. /* add mutex operation */
  518. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  519. ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
  520. &per_cpu(used_l1_ssram_head, cpu));
  521. /* add mutex operation */
  522. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  523. put_cpu();
  524. return ret;
  525. }
  526. void *l2_sram_alloc(size_t size)
  527. {
  528. #if L2_LENGTH != 0
  529. unsigned long flags;
  530. void *addr;
  531. /* add mutex operation */
  532. spin_lock_irqsave(&l2_sram_lock, flags);
  533. addr = _sram_alloc(size, &free_l2_sram_head,
  534. &used_l2_sram_head);
  535. /* add mutex operation */
  536. spin_unlock_irqrestore(&l2_sram_lock, flags);
  537. pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
  538. (long unsigned int)addr, size);
  539. return addr;
  540. #else
  541. return NULL;
  542. #endif
  543. }
  544. EXPORT_SYMBOL(l2_sram_alloc);
  545. void *l2_sram_zalloc(size_t size)
  546. {
  547. void *addr = l2_sram_alloc(size);
  548. if (addr)
  549. memset(addr, 0x00, size);
  550. return addr;
  551. }
  552. EXPORT_SYMBOL(l2_sram_zalloc);
  553. int l2_sram_free(const void *addr)
  554. {
  555. #if L2_LENGTH != 0
  556. unsigned long flags;
  557. int ret;
  558. /* add mutex operation */
  559. spin_lock_irqsave(&l2_sram_lock, flags);
  560. ret = _sram_free(addr, &free_l2_sram_head,
  561. &used_l2_sram_head);
  562. /* add mutex operation */
  563. spin_unlock_irqrestore(&l2_sram_lock, flags);
  564. return ret;
  565. #else
  566. return -1;
  567. #endif
  568. }
  569. EXPORT_SYMBOL(l2_sram_free);
  570. int sram_free_with_lsl(const void *addr)
  571. {
  572. struct sram_list_struct *lsl, **tmp;
  573. struct mm_struct *mm = current->mm;
  574. for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
  575. if ((*tmp)->addr == addr)
  576. goto found;
  577. return -1;
  578. found:
  579. lsl = *tmp;
  580. sram_free(addr);
  581. *tmp = lsl->next;
  582. kfree(lsl);
  583. return 0;
  584. }
  585. EXPORT_SYMBOL(sram_free_with_lsl);
  586. /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
  587. * tracked. These are designed for userspace so that when a process exits,
  588. * we can safely reap their resources.
  589. */
  590. void *sram_alloc_with_lsl(size_t size, unsigned long flags)
  591. {
  592. void *addr = NULL;
  593. struct sram_list_struct *lsl = NULL;
  594. struct mm_struct *mm = current->mm;
  595. lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
  596. if (!lsl)
  597. return NULL;
  598. if (flags & L1_INST_SRAM)
  599. addr = l1_inst_sram_alloc(size);
  600. if (addr == NULL && (flags & L1_DATA_A_SRAM))
  601. addr = l1_data_A_sram_alloc(size);
  602. if (addr == NULL && (flags & L1_DATA_B_SRAM))
  603. addr = l1_data_B_sram_alloc(size);
  604. if (addr == NULL && (flags & L2_SRAM))
  605. addr = l2_sram_alloc(size);
  606. if (addr == NULL) {
  607. kfree(lsl);
  608. return NULL;
  609. }
  610. lsl->addr = addr;
  611. lsl->length = size;
  612. lsl->next = mm->context.sram_list;
  613. mm->context.sram_list = lsl;
  614. return addr;
  615. }
  616. EXPORT_SYMBOL(sram_alloc_with_lsl);
  617. #ifdef CONFIG_PROC_FS
  618. /* Once we get a real allocator, we'll throw all of this away.
  619. * Until then, we need some sort of visibility into the L1 alloc.
  620. */
  621. /* Need to keep line of output the same. Currently, that is 44 bytes
  622. * (including newline).
  623. */
  624. static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
  625. struct sram_piece *pfree_head,
  626. struct sram_piece *pused_head)
  627. {
  628. struct sram_piece *pslot;
  629. if (!pfree_head || !pused_head)
  630. return -1;
  631. *len += sprintf(&buf[*len], "--- SRAM %-14s Size PID State \n", desc);
  632. /* search the relevant memory slot */
  633. pslot = pused_head->next;
  634. while (pslot != NULL) {
  635. *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
  636. pslot->paddr, pslot->paddr + pslot->size,
  637. pslot->size, pslot->pid, "ALLOCATED");
  638. pslot = pslot->next;
  639. }
  640. pslot = pfree_head->next;
  641. while (pslot != NULL) {
  642. *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
  643. pslot->paddr, pslot->paddr + pslot->size,
  644. pslot->size, pslot->pid, "FREE");
  645. pslot = pslot->next;
  646. }
  647. return 0;
  648. }
  649. static int sram_proc_read(char *buf, char **start, off_t offset, int count,
  650. int *eof, void *data)
  651. {
  652. int len = 0;
  653. unsigned int cpu;
  654. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  655. if (_sram_proc_read(buf, &len, count, "Scratchpad",
  656. &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
  657. goto not_done;
  658. #if L1_DATA_A_LENGTH != 0
  659. if (_sram_proc_read(buf, &len, count, "L1 Data A",
  660. &per_cpu(free_l1_data_A_sram_head, cpu),
  661. &per_cpu(used_l1_data_A_sram_head, cpu)))
  662. goto not_done;
  663. #endif
  664. #if L1_DATA_B_LENGTH != 0
  665. if (_sram_proc_read(buf, &len, count, "L1 Data B",
  666. &per_cpu(free_l1_data_B_sram_head, cpu),
  667. &per_cpu(used_l1_data_B_sram_head, cpu)))
  668. goto not_done;
  669. #endif
  670. #if L1_CODE_LENGTH != 0
  671. if (_sram_proc_read(buf, &len, count, "L1 Instruction",
  672. &per_cpu(free_l1_inst_sram_head, cpu),
  673. &per_cpu(used_l1_inst_sram_head, cpu)))
  674. goto not_done;
  675. #endif
  676. }
  677. #if L2_LENGTH != 0
  678. if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head,
  679. &used_l2_sram_head))
  680. goto not_done;
  681. #endif
  682. *eof = 1;
  683. not_done:
  684. return len;
  685. }
  686. static int __init sram_proc_init(void)
  687. {
  688. struct proc_dir_entry *ptr;
  689. ptr = create_proc_entry("sram", S_IFREG | S_IRUGO, NULL);
  690. if (!ptr) {
  691. printk(KERN_WARNING "unable to create /proc/sram\n");
  692. return -1;
  693. }
  694. ptr->read_proc = sram_proc_read;
  695. return 0;
  696. }
  697. late_initcall(sram_proc_init);
  698. #endif