sram-alloc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. /*
  2. * SRAM allocator for Blackfin on-chip memory
  3. *
  4. * Copyright 2004-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/types.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/ioport.h>
  13. #include <linux/fcntl.h>
  14. #include <linux/init.h>
  15. #include <linux/poll.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/rtc.h>
  19. #include <linux/slab.h>
  20. #include <asm/blackfin.h>
  21. #include <asm/mem_map.h>
  22. #include "blackfin_sram.h"
  23. /* the data structure for L1 scratchpad and DATA SRAM */
  24. struct sram_piece {
  25. void *paddr;
  26. int size;
  27. pid_t pid;
  28. struct sram_piece *next;
  29. };
  30. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
  31. static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
  32. static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
  33. #if L1_DATA_A_LENGTH != 0
  34. static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
  35. static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
  36. #endif
  37. #if L1_DATA_B_LENGTH != 0
  38. static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
  39. static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
  40. #endif
  41. #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
  42. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
  43. #endif
  44. #if L1_CODE_LENGTH != 0
  45. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
  46. static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
  47. static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
  48. #endif
  49. #if L2_LENGTH != 0
  50. static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
  51. static struct sram_piece free_l2_sram_head, used_l2_sram_head;
  52. #endif
  53. static struct kmem_cache *sram_piece_cache;
  54. /* L1 Scratchpad SRAM initialization function */
  55. static void __init l1sram_init(void)
  56. {
  57. unsigned int cpu;
  58. unsigned long reserve;
  59. #ifdef CONFIG_SMP
  60. reserve = 0;
  61. #else
  62. reserve = sizeof(struct l1_scratch_task_info);
  63. #endif
  64. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  65. per_cpu(free_l1_ssram_head, cpu).next =
  66. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  67. if (!per_cpu(free_l1_ssram_head, cpu).next) {
  68. printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
  69. return;
  70. }
  71. per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
  72. per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
  73. per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
  74. per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
  75. per_cpu(used_l1_ssram_head, cpu).next = NULL;
  76. /* mutex initialize */
  77. spin_lock_init(&per_cpu(l1sram_lock, cpu));
  78. printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
  79. L1_SCRATCH_LENGTH >> 10);
  80. }
  81. }
  82. static void __init l1_data_sram_init(void)
  83. {
  84. #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
  85. unsigned int cpu;
  86. #endif
  87. #if L1_DATA_A_LENGTH != 0
  88. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  89. per_cpu(free_l1_data_A_sram_head, cpu).next =
  90. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  91. if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
  92. printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
  93. return;
  94. }
  95. per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
  96. (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
  97. per_cpu(free_l1_data_A_sram_head, cpu).next->size =
  98. L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
  99. per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
  100. per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
  101. per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
  102. printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
  103. L1_DATA_A_LENGTH >> 10,
  104. per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
  105. }
  106. #endif
  107. #if L1_DATA_B_LENGTH != 0
  108. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  109. per_cpu(free_l1_data_B_sram_head, cpu).next =
  110. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  111. if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
  112. printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
  113. return;
  114. }
  115. per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
  116. (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
  117. per_cpu(free_l1_data_B_sram_head, cpu).next->size =
  118. L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
  119. per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
  120. per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
  121. per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
  122. printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
  123. L1_DATA_B_LENGTH >> 10,
  124. per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
  125. /* mutex initialize */
  126. }
  127. #endif
  128. #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
  129. for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
  130. spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
  131. #endif
  132. }
  133. static void __init l1_inst_sram_init(void)
  134. {
  135. #if L1_CODE_LENGTH != 0
  136. unsigned int cpu;
  137. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  138. per_cpu(free_l1_inst_sram_head, cpu).next =
  139. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  140. if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
  141. printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
  142. return;
  143. }
  144. per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
  145. (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
  146. per_cpu(free_l1_inst_sram_head, cpu).next->size =
  147. L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
  148. per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
  149. per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
  150. per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
  151. printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
  152. L1_CODE_LENGTH >> 10,
  153. per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
  154. /* mutex initialize */
  155. spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
  156. }
  157. #endif
  158. }
  159. static void __init l2_sram_init(void)
  160. {
  161. #if L2_LENGTH != 0
  162. free_l2_sram_head.next =
  163. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  164. if (!free_l2_sram_head.next) {
  165. printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
  166. return;
  167. }
  168. free_l2_sram_head.next->paddr =
  169. (void *)L2_START + (_ebss_l2 - _stext_l2);
  170. free_l2_sram_head.next->size =
  171. L2_LENGTH - (_ebss_l2 - _stext_l2);
  172. free_l2_sram_head.next->pid = 0;
  173. free_l2_sram_head.next->next = NULL;
  174. used_l2_sram_head.next = NULL;
  175. printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
  176. L2_LENGTH >> 10,
  177. free_l2_sram_head.next->size >> 10);
  178. /* mutex initialize */
  179. spin_lock_init(&l2_sram_lock);
  180. #endif
  181. }
  182. static int __init bfin_sram_init(void)
  183. {
  184. sram_piece_cache = kmem_cache_create("sram_piece_cache",
  185. sizeof(struct sram_piece),
  186. 0, SLAB_PANIC, NULL);
  187. l1sram_init();
  188. l1_data_sram_init();
  189. l1_inst_sram_init();
  190. l2_sram_init();
  191. return 0;
  192. }
  193. pure_initcall(bfin_sram_init);
  194. /* SRAM allocate function */
  195. static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
  196. struct sram_piece *pused_head)
  197. {
  198. struct sram_piece *pslot, *plast, *pavail;
  199. if (size <= 0 || !pfree_head || !pused_head)
  200. return NULL;
  201. /* Align the size */
  202. size = (size + 3) & ~3;
  203. pslot = pfree_head->next;
  204. plast = pfree_head;
  205. /* search an available piece slot */
  206. while (pslot != NULL && size > pslot->size) {
  207. plast = pslot;
  208. pslot = pslot->next;
  209. }
  210. if (!pslot)
  211. return NULL;
  212. if (pslot->size == size) {
  213. plast->next = pslot->next;
  214. pavail = pslot;
  215. } else {
  216. /* use atomic so our L1 allocator can be used atomically */
  217. pavail = kmem_cache_alloc(sram_piece_cache, GFP_ATOMIC);
  218. if (!pavail)
  219. return NULL;
  220. pavail->paddr = pslot->paddr;
  221. pavail->size = size;
  222. pslot->paddr += size;
  223. pslot->size -= size;
  224. }
  225. pavail->pid = current->pid;
  226. pslot = pused_head->next;
  227. plast = pused_head;
  228. /* insert new piece into used piece list !!! */
  229. while (pslot != NULL && pavail->paddr < pslot->paddr) {
  230. plast = pslot;
  231. pslot = pslot->next;
  232. }
  233. pavail->next = pslot;
  234. plast->next = pavail;
  235. return pavail->paddr;
  236. }
  237. /* Allocate the largest available block. */
  238. static void *_sram_alloc_max(struct sram_piece *pfree_head,
  239. struct sram_piece *pused_head,
  240. unsigned long *psize)
  241. {
  242. struct sram_piece *pslot, *pmax;
  243. if (!pfree_head || !pused_head)
  244. return NULL;
  245. pmax = pslot = pfree_head->next;
  246. /* search an available piece slot */
  247. while (pslot != NULL) {
  248. if (pslot->size > pmax->size)
  249. pmax = pslot;
  250. pslot = pslot->next;
  251. }
  252. if (!pmax)
  253. return NULL;
  254. *psize = pmax->size;
  255. return _sram_alloc(*psize, pfree_head, pused_head);
  256. }
  257. /* SRAM free function */
  258. static int _sram_free(const void *addr,
  259. struct sram_piece *pfree_head,
  260. struct sram_piece *pused_head)
  261. {
  262. struct sram_piece *pslot, *plast, *pavail;
  263. if (!pfree_head || !pused_head)
  264. return -1;
  265. /* search the relevant memory slot */
  266. pslot = pused_head->next;
  267. plast = pused_head;
  268. /* search an available piece slot */
  269. while (pslot != NULL && pslot->paddr != addr) {
  270. plast = pslot;
  271. pslot = pslot->next;
  272. }
  273. if (!pslot)
  274. return -1;
  275. plast->next = pslot->next;
  276. pavail = pslot;
  277. pavail->pid = 0;
  278. /* insert free pieces back to the free list */
  279. pslot = pfree_head->next;
  280. plast = pfree_head;
  281. while (pslot != NULL && addr > pslot->paddr) {
  282. plast = pslot;
  283. pslot = pslot->next;
  284. }
  285. if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
  286. plast->size += pavail->size;
  287. kmem_cache_free(sram_piece_cache, pavail);
  288. } else {
  289. pavail->next = plast->next;
  290. plast->next = pavail;
  291. plast = pavail;
  292. }
  293. if (pslot && plast->paddr + plast->size == pslot->paddr) {
  294. plast->size += pslot->size;
  295. plast->next = pslot->next;
  296. kmem_cache_free(sram_piece_cache, pslot);
  297. }
  298. return 0;
  299. }
  300. int sram_free(const void *addr)
  301. {
  302. #if L1_CODE_LENGTH != 0
  303. if (addr >= (void *)get_l1_code_start()
  304. && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
  305. return l1_inst_sram_free(addr);
  306. else
  307. #endif
  308. #if L1_DATA_A_LENGTH != 0
  309. if (addr >= (void *)get_l1_data_a_start()
  310. && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
  311. return l1_data_A_sram_free(addr);
  312. else
  313. #endif
  314. #if L1_DATA_B_LENGTH != 0
  315. if (addr >= (void *)get_l1_data_b_start()
  316. && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
  317. return l1_data_B_sram_free(addr);
  318. else
  319. #endif
  320. #if L2_LENGTH != 0
  321. if (addr >= (void *)L2_START
  322. && addr < (void *)(L2_START + L2_LENGTH))
  323. return l2_sram_free(addr);
  324. else
  325. #endif
  326. return -1;
  327. }
  328. EXPORT_SYMBOL(sram_free);
  329. void *l1_data_A_sram_alloc(size_t size)
  330. {
  331. #if L1_DATA_A_LENGTH != 0
  332. unsigned long flags;
  333. void *addr;
  334. unsigned int cpu;
  335. cpu = smp_processor_id();
  336. /* add mutex operation */
  337. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  338. addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
  339. &per_cpu(used_l1_data_A_sram_head, cpu));
  340. /* add mutex operation */
  341. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  342. pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
  343. (long unsigned int)addr, size);
  344. return addr;
  345. #else
  346. return NULL;
  347. #endif
  348. }
  349. EXPORT_SYMBOL(l1_data_A_sram_alloc);
  350. int l1_data_A_sram_free(const void *addr)
  351. {
  352. #if L1_DATA_A_LENGTH != 0
  353. unsigned long flags;
  354. int ret;
  355. unsigned int cpu;
  356. cpu = smp_processor_id();
  357. /* add mutex operation */
  358. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  359. ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
  360. &per_cpu(used_l1_data_A_sram_head, cpu));
  361. /* add mutex operation */
  362. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  363. return ret;
  364. #else
  365. return -1;
  366. #endif
  367. }
  368. EXPORT_SYMBOL(l1_data_A_sram_free);
  369. void *l1_data_B_sram_alloc(size_t size)
  370. {
  371. #if L1_DATA_B_LENGTH != 0
  372. unsigned long flags;
  373. void *addr;
  374. unsigned int cpu;
  375. cpu = smp_processor_id();
  376. /* add mutex operation */
  377. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  378. addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
  379. &per_cpu(used_l1_data_B_sram_head, cpu));
  380. /* add mutex operation */
  381. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  382. pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
  383. (long unsigned int)addr, size);
  384. return addr;
  385. #else
  386. return NULL;
  387. #endif
  388. }
  389. EXPORT_SYMBOL(l1_data_B_sram_alloc);
  390. int l1_data_B_sram_free(const void *addr)
  391. {
  392. #if L1_DATA_B_LENGTH != 0
  393. unsigned long flags;
  394. int ret;
  395. unsigned int cpu;
  396. cpu = smp_processor_id();
  397. /* add mutex operation */
  398. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  399. ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
  400. &per_cpu(used_l1_data_B_sram_head, cpu));
  401. /* add mutex operation */
  402. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  403. return ret;
  404. #else
  405. return -1;
  406. #endif
  407. }
  408. EXPORT_SYMBOL(l1_data_B_sram_free);
  409. void *l1_data_sram_alloc(size_t size)
  410. {
  411. void *addr = l1_data_A_sram_alloc(size);
  412. if (!addr)
  413. addr = l1_data_B_sram_alloc(size);
  414. return addr;
  415. }
  416. EXPORT_SYMBOL(l1_data_sram_alloc);
  417. void *l1_data_sram_zalloc(size_t size)
  418. {
  419. void *addr = l1_data_sram_alloc(size);
  420. if (addr)
  421. memset(addr, 0x00, size);
  422. return addr;
  423. }
  424. EXPORT_SYMBOL(l1_data_sram_zalloc);
  425. int l1_data_sram_free(const void *addr)
  426. {
  427. int ret;
  428. ret = l1_data_A_sram_free(addr);
  429. if (ret == -1)
  430. ret = l1_data_B_sram_free(addr);
  431. return ret;
  432. }
  433. EXPORT_SYMBOL(l1_data_sram_free);
  434. void *l1_inst_sram_alloc(size_t size)
  435. {
  436. #if L1_CODE_LENGTH != 0
  437. unsigned long flags;
  438. void *addr;
  439. unsigned int cpu;
  440. cpu = smp_processor_id();
  441. /* add mutex operation */
  442. spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
  443. addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
  444. &per_cpu(used_l1_inst_sram_head, cpu));
  445. /* add mutex operation */
  446. spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
  447. pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
  448. (long unsigned int)addr, size);
  449. return addr;
  450. #else
  451. return NULL;
  452. #endif
  453. }
  454. EXPORT_SYMBOL(l1_inst_sram_alloc);
  455. int l1_inst_sram_free(const void *addr)
  456. {
  457. #if L1_CODE_LENGTH != 0
  458. unsigned long flags;
  459. int ret;
  460. unsigned int cpu;
  461. cpu = smp_processor_id();
  462. /* add mutex operation */
  463. spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
  464. ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
  465. &per_cpu(used_l1_inst_sram_head, cpu));
  466. /* add mutex operation */
  467. spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
  468. return ret;
  469. #else
  470. return -1;
  471. #endif
  472. }
  473. EXPORT_SYMBOL(l1_inst_sram_free);
  474. /* L1 Scratchpad memory allocate function */
  475. void *l1sram_alloc(size_t size)
  476. {
  477. unsigned long flags;
  478. void *addr;
  479. unsigned int cpu;
  480. cpu = smp_processor_id();
  481. /* add mutex operation */
  482. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  483. addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
  484. &per_cpu(used_l1_ssram_head, cpu));
  485. /* add mutex operation */
  486. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  487. return addr;
  488. }
  489. /* L1 Scratchpad memory allocate function */
  490. void *l1sram_alloc_max(size_t *psize)
  491. {
  492. unsigned long flags;
  493. void *addr;
  494. unsigned int cpu;
  495. cpu = smp_processor_id();
  496. /* add mutex operation */
  497. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  498. addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
  499. &per_cpu(used_l1_ssram_head, cpu), psize);
  500. /* add mutex operation */
  501. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  502. return addr;
  503. }
  504. /* L1 Scratchpad memory free function */
  505. int l1sram_free(const void *addr)
  506. {
  507. unsigned long flags;
  508. int ret;
  509. unsigned int cpu;
  510. cpu = smp_processor_id();
  511. /* add mutex operation */
  512. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  513. ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
  514. &per_cpu(used_l1_ssram_head, cpu));
  515. /* add mutex operation */
  516. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  517. return ret;
  518. }
  519. void *l2_sram_alloc(size_t size)
  520. {
  521. #if L2_LENGTH != 0
  522. unsigned long flags;
  523. void *addr;
  524. /* add mutex operation */
  525. spin_lock_irqsave(&l2_sram_lock, flags);
  526. addr = _sram_alloc(size, &free_l2_sram_head,
  527. &used_l2_sram_head);
  528. /* add mutex operation */
  529. spin_unlock_irqrestore(&l2_sram_lock, flags);
  530. pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
  531. (long unsigned int)addr, size);
  532. return addr;
  533. #else
  534. return NULL;
  535. #endif
  536. }
  537. EXPORT_SYMBOL(l2_sram_alloc);
  538. void *l2_sram_zalloc(size_t size)
  539. {
  540. void *addr = l2_sram_alloc(size);
  541. if (addr)
  542. memset(addr, 0x00, size);
  543. return addr;
  544. }
  545. EXPORT_SYMBOL(l2_sram_zalloc);
  546. int l2_sram_free(const void *addr)
  547. {
  548. #if L2_LENGTH != 0
  549. unsigned long flags;
  550. int ret;
  551. /* add mutex operation */
  552. spin_lock_irqsave(&l2_sram_lock, flags);
  553. ret = _sram_free(addr, &free_l2_sram_head,
  554. &used_l2_sram_head);
  555. /* add mutex operation */
  556. spin_unlock_irqrestore(&l2_sram_lock, flags);
  557. return ret;
  558. #else
  559. return -1;
  560. #endif
  561. }
  562. EXPORT_SYMBOL(l2_sram_free);
  563. int sram_free_with_lsl(const void *addr)
  564. {
  565. struct sram_list_struct *lsl, **tmp;
  566. struct mm_struct *mm = current->mm;
  567. int ret = -1;
  568. for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
  569. if ((*tmp)->addr == addr) {
  570. lsl = *tmp;
  571. ret = sram_free(addr);
  572. *tmp = lsl->next;
  573. kfree(lsl);
  574. break;
  575. }
  576. return ret;
  577. }
  578. EXPORT_SYMBOL(sram_free_with_lsl);
  579. /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
  580. * tracked. These are designed for userspace so that when a process exits,
  581. * we can safely reap their resources.
  582. */
  583. void *sram_alloc_with_lsl(size_t size, unsigned long flags)
  584. {
  585. void *addr = NULL;
  586. struct sram_list_struct *lsl = NULL;
  587. struct mm_struct *mm = current->mm;
  588. lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
  589. if (!lsl)
  590. return NULL;
  591. if (flags & L1_INST_SRAM)
  592. addr = l1_inst_sram_alloc(size);
  593. if (addr == NULL && (flags & L1_DATA_A_SRAM))
  594. addr = l1_data_A_sram_alloc(size);
  595. if (addr == NULL && (flags & L1_DATA_B_SRAM))
  596. addr = l1_data_B_sram_alloc(size);
  597. if (addr == NULL && (flags & L2_SRAM))
  598. addr = l2_sram_alloc(size);
  599. if (addr == NULL) {
  600. kfree(lsl);
  601. return NULL;
  602. }
  603. lsl->addr = addr;
  604. lsl->length = size;
  605. lsl->next = mm->context.sram_list;
  606. mm->context.sram_list = lsl;
  607. return addr;
  608. }
  609. EXPORT_SYMBOL(sram_alloc_with_lsl);
  610. #ifdef CONFIG_PROC_FS
  611. /* Once we get a real allocator, we'll throw all of this away.
  612. * Until then, we need some sort of visibility into the L1 alloc.
  613. */
  614. /* Need to keep line of output the same. Currently, that is 44 bytes
  615. * (including newline).
  616. */
  617. static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
  618. struct sram_piece *pfree_head,
  619. struct sram_piece *pused_head)
  620. {
  621. struct sram_piece *pslot;
  622. if (!pfree_head || !pused_head)
  623. return -1;
  624. *len += sprintf(&buf[*len], "--- SRAM %-14s Size PID State \n", desc);
  625. /* search the relevant memory slot */
  626. pslot = pused_head->next;
  627. while (pslot != NULL) {
  628. *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
  629. pslot->paddr, pslot->paddr + pslot->size,
  630. pslot->size, pslot->pid, "ALLOCATED");
  631. pslot = pslot->next;
  632. }
  633. pslot = pfree_head->next;
  634. while (pslot != NULL) {
  635. *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
  636. pslot->paddr, pslot->paddr + pslot->size,
  637. pslot->size, pslot->pid, "FREE");
  638. pslot = pslot->next;
  639. }
  640. return 0;
  641. }
  642. static int sram_proc_read(char *buf, char **start, off_t offset, int count,
  643. int *eof, void *data)
  644. {
  645. int len = 0;
  646. unsigned int cpu;
  647. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  648. if (_sram_proc_read(buf, &len, count, "Scratchpad",
  649. &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
  650. goto not_done;
  651. #if L1_DATA_A_LENGTH != 0
  652. if (_sram_proc_read(buf, &len, count, "L1 Data A",
  653. &per_cpu(free_l1_data_A_sram_head, cpu),
  654. &per_cpu(used_l1_data_A_sram_head, cpu)))
  655. goto not_done;
  656. #endif
  657. #if L1_DATA_B_LENGTH != 0
  658. if (_sram_proc_read(buf, &len, count, "L1 Data B",
  659. &per_cpu(free_l1_data_B_sram_head, cpu),
  660. &per_cpu(used_l1_data_B_sram_head, cpu)))
  661. goto not_done;
  662. #endif
  663. #if L1_CODE_LENGTH != 0
  664. if (_sram_proc_read(buf, &len, count, "L1 Instruction",
  665. &per_cpu(free_l1_inst_sram_head, cpu),
  666. &per_cpu(used_l1_inst_sram_head, cpu)))
  667. goto not_done;
  668. #endif
  669. }
  670. #if L2_LENGTH != 0
  671. if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head,
  672. &used_l2_sram_head))
  673. goto not_done;
  674. #endif
  675. *eof = 1;
  676. not_done:
  677. return len;
  678. }
  679. static int __init sram_proc_init(void)
  680. {
  681. struct proc_dir_entry *ptr;
  682. ptr = create_proc_entry("sram", S_IFREG | S_IRUGO, NULL);
  683. if (!ptr) {
  684. printk(KERN_WARNING "unable to create /proc/sram\n");
  685. return -1;
  686. }
  687. ptr->read_proc = sram_proc_read;
  688. return 0;
  689. }
  690. late_initcall(sram_proc_init);
  691. #endif