sram-alloc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. /*
  2. * SRAM allocator for Blackfin on-chip memory
  3. *
  4. * Copyright 2004-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/types.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/ioport.h>
  13. #include <linux/fcntl.h>
  14. #include <linux/init.h>
  15. #include <linux/poll.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/rtc.h>
  19. #include <linux/slab.h>
  20. #include <asm/blackfin.h>
  21. #include <asm/mem_map.h>
  22. #include "blackfin_sram.h"
  23. /* the data structure for L1 scratchpad and DATA SRAM */
  24. struct sram_piece {
  25. void *paddr;
  26. int size;
  27. pid_t pid;
  28. struct sram_piece *next;
  29. };
  30. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
  31. static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
  32. static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
  33. #if L1_DATA_A_LENGTH != 0
  34. static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
  35. static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
  36. #endif
  37. #if L1_DATA_B_LENGTH != 0
  38. static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
  39. static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
  40. #endif
  41. #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
  42. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
  43. #endif
  44. #if L1_CODE_LENGTH != 0
  45. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
  46. static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
  47. static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
  48. #endif
  49. #if L2_LENGTH != 0
  50. static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
  51. static struct sram_piece free_l2_sram_head, used_l2_sram_head;
  52. #endif
  53. static struct kmem_cache *sram_piece_cache;
  54. /* L1 Scratchpad SRAM initialization function */
  55. static void __init l1sram_init(void)
  56. {
  57. unsigned int cpu;
  58. unsigned long reserve;
  59. #ifdef CONFIG_SMP
  60. reserve = 0;
  61. #else
  62. reserve = sizeof(struct l1_scratch_task_info);
  63. #endif
  64. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  65. per_cpu(free_l1_ssram_head, cpu).next =
  66. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  67. if (!per_cpu(free_l1_ssram_head, cpu).next) {
  68. printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
  69. return;
  70. }
  71. per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
  72. per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
  73. per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
  74. per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
  75. per_cpu(used_l1_ssram_head, cpu).next = NULL;
  76. /* mutex initialize */
  77. spin_lock_init(&per_cpu(l1sram_lock, cpu));
  78. printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
  79. L1_SCRATCH_LENGTH >> 10);
  80. }
  81. }
  82. static void __init l1_data_sram_init(void)
  83. {
  84. #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
  85. unsigned int cpu;
  86. #endif
  87. #if L1_DATA_A_LENGTH != 0
  88. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  89. per_cpu(free_l1_data_A_sram_head, cpu).next =
  90. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  91. if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
  92. printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
  93. return;
  94. }
  95. per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
  96. (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
  97. per_cpu(free_l1_data_A_sram_head, cpu).next->size =
  98. L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
  99. per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
  100. per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
  101. per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
  102. printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
  103. L1_DATA_A_LENGTH >> 10,
  104. per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
  105. }
  106. #endif
  107. #if L1_DATA_B_LENGTH != 0
  108. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  109. per_cpu(free_l1_data_B_sram_head, cpu).next =
  110. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  111. if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
  112. printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
  113. return;
  114. }
  115. per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
  116. (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
  117. per_cpu(free_l1_data_B_sram_head, cpu).next->size =
  118. L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
  119. per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
  120. per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
  121. per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
  122. printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
  123. L1_DATA_B_LENGTH >> 10,
  124. per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
  125. /* mutex initialize */
  126. }
  127. #endif
  128. #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
  129. for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
  130. spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
  131. #endif
  132. }
  133. static void __init l1_inst_sram_init(void)
  134. {
  135. #if L1_CODE_LENGTH != 0
  136. unsigned int cpu;
  137. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  138. per_cpu(free_l1_inst_sram_head, cpu).next =
  139. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  140. if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
  141. printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
  142. return;
  143. }
  144. per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
  145. (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
  146. per_cpu(free_l1_inst_sram_head, cpu).next->size =
  147. L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
  148. per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
  149. per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
  150. per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
  151. printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
  152. L1_CODE_LENGTH >> 10,
  153. per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
  154. /* mutex initialize */
  155. spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
  156. }
  157. #endif
  158. }
  159. static void __init l2_sram_init(void)
  160. {
  161. #if L2_LENGTH != 0
  162. free_l2_sram_head.next =
  163. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  164. if (!free_l2_sram_head.next) {
  165. printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
  166. return;
  167. }
  168. free_l2_sram_head.next->paddr =
  169. (void *)L2_START + (_ebss_l2 - _stext_l2);
  170. free_l2_sram_head.next->size =
  171. L2_LENGTH - (_ebss_l2 - _stext_l2);
  172. free_l2_sram_head.next->pid = 0;
  173. free_l2_sram_head.next->next = NULL;
  174. used_l2_sram_head.next = NULL;
  175. printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
  176. L2_LENGTH >> 10,
  177. free_l2_sram_head.next->size >> 10);
  178. /* mutex initialize */
  179. spin_lock_init(&l2_sram_lock);
  180. #endif
  181. }
  182. static int __init bfin_sram_init(void)
  183. {
  184. sram_piece_cache = kmem_cache_create("sram_piece_cache",
  185. sizeof(struct sram_piece),
  186. 0, SLAB_PANIC, NULL);
  187. l1sram_init();
  188. l1_data_sram_init();
  189. l1_inst_sram_init();
  190. l2_sram_init();
  191. return 0;
  192. }
  193. pure_initcall(bfin_sram_init);
  194. /* SRAM allocate function */
  195. static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
  196. struct sram_piece *pused_head)
  197. {
  198. struct sram_piece *pslot, *plast, *pavail;
  199. if (size <= 0 || !pfree_head || !pused_head)
  200. return NULL;
  201. /* Align the size */
  202. size = (size + 3) & ~3;
  203. pslot = pfree_head->next;
  204. plast = pfree_head;
  205. /* search an available piece slot */
  206. while (pslot != NULL && size > pslot->size) {
  207. plast = pslot;
  208. pslot = pslot->next;
  209. }
  210. if (!pslot)
  211. return NULL;
  212. if (pslot->size == size) {
  213. plast->next = pslot->next;
  214. pavail = pslot;
  215. } else {
  216. pavail = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  217. if (!pavail)
  218. return NULL;
  219. pavail->paddr = pslot->paddr;
  220. pavail->size = size;
  221. pslot->paddr += size;
  222. pslot->size -= size;
  223. }
  224. pavail->pid = current->pid;
  225. pslot = pused_head->next;
  226. plast = pused_head;
  227. /* insert new piece into used piece list !!! */
  228. while (pslot != NULL && pavail->paddr < pslot->paddr) {
  229. plast = pslot;
  230. pslot = pslot->next;
  231. }
  232. pavail->next = pslot;
  233. plast->next = pavail;
  234. return pavail->paddr;
  235. }
  236. /* Allocate the largest available block. */
  237. static void *_sram_alloc_max(struct sram_piece *pfree_head,
  238. struct sram_piece *pused_head,
  239. unsigned long *psize)
  240. {
  241. struct sram_piece *pslot, *pmax;
  242. if (!pfree_head || !pused_head)
  243. return NULL;
  244. pmax = pslot = pfree_head->next;
  245. /* search an available piece slot */
  246. while (pslot != NULL) {
  247. if (pslot->size > pmax->size)
  248. pmax = pslot;
  249. pslot = pslot->next;
  250. }
  251. if (!pmax)
  252. return NULL;
  253. *psize = pmax->size;
  254. return _sram_alloc(*psize, pfree_head, pused_head);
  255. }
  256. /* SRAM free function */
  257. static int _sram_free(const void *addr,
  258. struct sram_piece *pfree_head,
  259. struct sram_piece *pused_head)
  260. {
  261. struct sram_piece *pslot, *plast, *pavail;
  262. if (!pfree_head || !pused_head)
  263. return -1;
  264. /* search the relevant memory slot */
  265. pslot = pused_head->next;
  266. plast = pused_head;
  267. /* search an available piece slot */
  268. while (pslot != NULL && pslot->paddr != addr) {
  269. plast = pslot;
  270. pslot = pslot->next;
  271. }
  272. if (!pslot)
  273. return -1;
  274. plast->next = pslot->next;
  275. pavail = pslot;
  276. pavail->pid = 0;
  277. /* insert free pieces back to the free list */
  278. pslot = pfree_head->next;
  279. plast = pfree_head;
  280. while (pslot != NULL && addr > pslot->paddr) {
  281. plast = pslot;
  282. pslot = pslot->next;
  283. }
  284. if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
  285. plast->size += pavail->size;
  286. kmem_cache_free(sram_piece_cache, pavail);
  287. } else {
  288. pavail->next = plast->next;
  289. plast->next = pavail;
  290. plast = pavail;
  291. }
  292. if (pslot && plast->paddr + plast->size == pslot->paddr) {
  293. plast->size += pslot->size;
  294. plast->next = pslot->next;
  295. kmem_cache_free(sram_piece_cache, pslot);
  296. }
  297. return 0;
  298. }
  299. int sram_free(const void *addr)
  300. {
  301. #if L1_CODE_LENGTH != 0
  302. if (addr >= (void *)get_l1_code_start()
  303. && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
  304. return l1_inst_sram_free(addr);
  305. else
  306. #endif
  307. #if L1_DATA_A_LENGTH != 0
  308. if (addr >= (void *)get_l1_data_a_start()
  309. && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
  310. return l1_data_A_sram_free(addr);
  311. else
  312. #endif
  313. #if L1_DATA_B_LENGTH != 0
  314. if (addr >= (void *)get_l1_data_b_start()
  315. && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
  316. return l1_data_B_sram_free(addr);
  317. else
  318. #endif
  319. #if L2_LENGTH != 0
  320. if (addr >= (void *)L2_START
  321. && addr < (void *)(L2_START + L2_LENGTH))
  322. return l2_sram_free(addr);
  323. else
  324. #endif
  325. return -1;
  326. }
  327. EXPORT_SYMBOL(sram_free);
  328. void *l1_data_A_sram_alloc(size_t size)
  329. {
  330. #if L1_DATA_A_LENGTH != 0
  331. unsigned long flags;
  332. void *addr;
  333. unsigned int cpu;
  334. cpu = smp_processor_id();
  335. /* add mutex operation */
  336. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  337. addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
  338. &per_cpu(used_l1_data_A_sram_head, cpu));
  339. /* add mutex operation */
  340. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  341. pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
  342. (long unsigned int)addr, size);
  343. return addr;
  344. #else
  345. return NULL;
  346. #endif
  347. }
  348. EXPORT_SYMBOL(l1_data_A_sram_alloc);
  349. int l1_data_A_sram_free(const void *addr)
  350. {
  351. #if L1_DATA_A_LENGTH != 0
  352. unsigned long flags;
  353. int ret;
  354. unsigned int cpu;
  355. cpu = smp_processor_id();
  356. /* add mutex operation */
  357. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  358. ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
  359. &per_cpu(used_l1_data_A_sram_head, cpu));
  360. /* add mutex operation */
  361. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  362. return ret;
  363. #else
  364. return -1;
  365. #endif
  366. }
  367. EXPORT_SYMBOL(l1_data_A_sram_free);
  368. void *l1_data_B_sram_alloc(size_t size)
  369. {
  370. #if L1_DATA_B_LENGTH != 0
  371. unsigned long flags;
  372. void *addr;
  373. unsigned int cpu;
  374. cpu = smp_processor_id();
  375. /* add mutex operation */
  376. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  377. addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
  378. &per_cpu(used_l1_data_B_sram_head, cpu));
  379. /* add mutex operation */
  380. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  381. pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
  382. (long unsigned int)addr, size);
  383. return addr;
  384. #else
  385. return NULL;
  386. #endif
  387. }
  388. EXPORT_SYMBOL(l1_data_B_sram_alloc);
  389. int l1_data_B_sram_free(const void *addr)
  390. {
  391. #if L1_DATA_B_LENGTH != 0
  392. unsigned long flags;
  393. int ret;
  394. unsigned int cpu;
  395. cpu = smp_processor_id();
  396. /* add mutex operation */
  397. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  398. ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
  399. &per_cpu(used_l1_data_B_sram_head, cpu));
  400. /* add mutex operation */
  401. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  402. return ret;
  403. #else
  404. return -1;
  405. #endif
  406. }
  407. EXPORT_SYMBOL(l1_data_B_sram_free);
  408. void *l1_data_sram_alloc(size_t size)
  409. {
  410. void *addr = l1_data_A_sram_alloc(size);
  411. if (!addr)
  412. addr = l1_data_B_sram_alloc(size);
  413. return addr;
  414. }
  415. EXPORT_SYMBOL(l1_data_sram_alloc);
  416. void *l1_data_sram_zalloc(size_t size)
  417. {
  418. void *addr = l1_data_sram_alloc(size);
  419. if (addr)
  420. memset(addr, 0x00, size);
  421. return addr;
  422. }
  423. EXPORT_SYMBOL(l1_data_sram_zalloc);
  424. int l1_data_sram_free(const void *addr)
  425. {
  426. int ret;
  427. ret = l1_data_A_sram_free(addr);
  428. if (ret == -1)
  429. ret = l1_data_B_sram_free(addr);
  430. return ret;
  431. }
  432. EXPORT_SYMBOL(l1_data_sram_free);
  433. void *l1_inst_sram_alloc(size_t size)
  434. {
  435. #if L1_CODE_LENGTH != 0
  436. unsigned long flags;
  437. void *addr;
  438. unsigned int cpu;
  439. cpu = smp_processor_id();
  440. /* add mutex operation */
  441. spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
  442. addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
  443. &per_cpu(used_l1_inst_sram_head, cpu));
  444. /* add mutex operation */
  445. spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
  446. pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
  447. (long unsigned int)addr, size);
  448. return addr;
  449. #else
  450. return NULL;
  451. #endif
  452. }
  453. EXPORT_SYMBOL(l1_inst_sram_alloc);
  454. int l1_inst_sram_free(const void *addr)
  455. {
  456. #if L1_CODE_LENGTH != 0
  457. unsigned long flags;
  458. int ret;
  459. unsigned int cpu;
  460. cpu = smp_processor_id();
  461. /* add mutex operation */
  462. spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
  463. ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
  464. &per_cpu(used_l1_inst_sram_head, cpu));
  465. /* add mutex operation */
  466. spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
  467. return ret;
  468. #else
  469. return -1;
  470. #endif
  471. }
  472. EXPORT_SYMBOL(l1_inst_sram_free);
  473. /* L1 Scratchpad memory allocate function */
  474. void *l1sram_alloc(size_t size)
  475. {
  476. unsigned long flags;
  477. void *addr;
  478. unsigned int cpu;
  479. cpu = smp_processor_id();
  480. /* add mutex operation */
  481. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  482. addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
  483. &per_cpu(used_l1_ssram_head, cpu));
  484. /* add mutex operation */
  485. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  486. return addr;
  487. }
  488. /* L1 Scratchpad memory allocate function */
  489. void *l1sram_alloc_max(size_t *psize)
  490. {
  491. unsigned long flags;
  492. void *addr;
  493. unsigned int cpu;
  494. cpu = smp_processor_id();
  495. /* add mutex operation */
  496. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  497. addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
  498. &per_cpu(used_l1_ssram_head, cpu), psize);
  499. /* add mutex operation */
  500. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  501. return addr;
  502. }
  503. /* L1 Scratchpad memory free function */
  504. int l1sram_free(const void *addr)
  505. {
  506. unsigned long flags;
  507. int ret;
  508. unsigned int cpu;
  509. cpu = smp_processor_id();
  510. /* add mutex operation */
  511. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  512. ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
  513. &per_cpu(used_l1_ssram_head, cpu));
  514. /* add mutex operation */
  515. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  516. return ret;
  517. }
  518. void *l2_sram_alloc(size_t size)
  519. {
  520. #if L2_LENGTH != 0
  521. unsigned long flags;
  522. void *addr;
  523. /* add mutex operation */
  524. spin_lock_irqsave(&l2_sram_lock, flags);
  525. addr = _sram_alloc(size, &free_l2_sram_head,
  526. &used_l2_sram_head);
  527. /* add mutex operation */
  528. spin_unlock_irqrestore(&l2_sram_lock, flags);
  529. pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
  530. (long unsigned int)addr, size);
  531. return addr;
  532. #else
  533. return NULL;
  534. #endif
  535. }
  536. EXPORT_SYMBOL(l2_sram_alloc);
  537. void *l2_sram_zalloc(size_t size)
  538. {
  539. void *addr = l2_sram_alloc(size);
  540. if (addr)
  541. memset(addr, 0x00, size);
  542. return addr;
  543. }
  544. EXPORT_SYMBOL(l2_sram_zalloc);
  545. int l2_sram_free(const void *addr)
  546. {
  547. #if L2_LENGTH != 0
  548. unsigned long flags;
  549. int ret;
  550. /* add mutex operation */
  551. spin_lock_irqsave(&l2_sram_lock, flags);
  552. ret = _sram_free(addr, &free_l2_sram_head,
  553. &used_l2_sram_head);
  554. /* add mutex operation */
  555. spin_unlock_irqrestore(&l2_sram_lock, flags);
  556. return ret;
  557. #else
  558. return -1;
  559. #endif
  560. }
  561. EXPORT_SYMBOL(l2_sram_free);
  562. int sram_free_with_lsl(const void *addr)
  563. {
  564. struct sram_list_struct *lsl, **tmp;
  565. struct mm_struct *mm = current->mm;
  566. for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
  567. if ((*tmp)->addr == addr)
  568. goto found;
  569. return -1;
  570. found:
  571. lsl = *tmp;
  572. sram_free(addr);
  573. *tmp = lsl->next;
  574. kfree(lsl);
  575. return 0;
  576. }
  577. EXPORT_SYMBOL(sram_free_with_lsl);
  578. /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
  579. * tracked. These are designed for userspace so that when a process exits,
  580. * we can safely reap their resources.
  581. */
  582. void *sram_alloc_with_lsl(size_t size, unsigned long flags)
  583. {
  584. void *addr = NULL;
  585. struct sram_list_struct *lsl = NULL;
  586. struct mm_struct *mm = current->mm;
  587. lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
  588. if (!lsl)
  589. return NULL;
  590. if (flags & L1_INST_SRAM)
  591. addr = l1_inst_sram_alloc(size);
  592. if (addr == NULL && (flags & L1_DATA_A_SRAM))
  593. addr = l1_data_A_sram_alloc(size);
  594. if (addr == NULL && (flags & L1_DATA_B_SRAM))
  595. addr = l1_data_B_sram_alloc(size);
  596. if (addr == NULL && (flags & L2_SRAM))
  597. addr = l2_sram_alloc(size);
  598. if (addr == NULL) {
  599. kfree(lsl);
  600. return NULL;
  601. }
  602. lsl->addr = addr;
  603. lsl->length = size;
  604. lsl->next = mm->context.sram_list;
  605. mm->context.sram_list = lsl;
  606. return addr;
  607. }
  608. EXPORT_SYMBOL(sram_alloc_with_lsl);
  609. #ifdef CONFIG_PROC_FS
  610. /* Once we get a real allocator, we'll throw all of this away.
  611. * Until then, we need some sort of visibility into the L1 alloc.
  612. */
  613. /* Need to keep line of output the same. Currently, that is 44 bytes
  614. * (including newline).
  615. */
  616. static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
  617. struct sram_piece *pfree_head,
  618. struct sram_piece *pused_head)
  619. {
  620. struct sram_piece *pslot;
  621. if (!pfree_head || !pused_head)
  622. return -1;
  623. *len += sprintf(&buf[*len], "--- SRAM %-14s Size PID State \n", desc);
  624. /* search the relevant memory slot */
  625. pslot = pused_head->next;
  626. while (pslot != NULL) {
  627. *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
  628. pslot->paddr, pslot->paddr + pslot->size,
  629. pslot->size, pslot->pid, "ALLOCATED");
  630. pslot = pslot->next;
  631. }
  632. pslot = pfree_head->next;
  633. while (pslot != NULL) {
  634. *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
  635. pslot->paddr, pslot->paddr + pslot->size,
  636. pslot->size, pslot->pid, "FREE");
  637. pslot = pslot->next;
  638. }
  639. return 0;
  640. }
  641. static int sram_proc_read(char *buf, char **start, off_t offset, int count,
  642. int *eof, void *data)
  643. {
  644. int len = 0;
  645. unsigned int cpu;
  646. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  647. if (_sram_proc_read(buf, &len, count, "Scratchpad",
  648. &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
  649. goto not_done;
  650. #if L1_DATA_A_LENGTH != 0
  651. if (_sram_proc_read(buf, &len, count, "L1 Data A",
  652. &per_cpu(free_l1_data_A_sram_head, cpu),
  653. &per_cpu(used_l1_data_A_sram_head, cpu)))
  654. goto not_done;
  655. #endif
  656. #if L1_DATA_B_LENGTH != 0
  657. if (_sram_proc_read(buf, &len, count, "L1 Data B",
  658. &per_cpu(free_l1_data_B_sram_head, cpu),
  659. &per_cpu(used_l1_data_B_sram_head, cpu)))
  660. goto not_done;
  661. #endif
  662. #if L1_CODE_LENGTH != 0
  663. if (_sram_proc_read(buf, &len, count, "L1 Instruction",
  664. &per_cpu(free_l1_inst_sram_head, cpu),
  665. &per_cpu(used_l1_inst_sram_head, cpu)))
  666. goto not_done;
  667. #endif
  668. }
  669. #if L2_LENGTH != 0
  670. if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head,
  671. &used_l2_sram_head))
  672. goto not_done;
  673. #endif
  674. *eof = 1;
  675. not_done:
  676. return len;
  677. }
  678. static int __init sram_proc_init(void)
  679. {
  680. struct proc_dir_entry *ptr;
  681. ptr = create_proc_entry("sram", S_IFREG | S_IRUGO, NULL);
  682. if (!ptr) {
  683. printk(KERN_WARNING "unable to create /proc/sram\n");
  684. return -1;
  685. }
  686. ptr->read_proc = sram_proc_read;
  687. return 0;
  688. }
  689. late_initcall(sram_proc_init);
  690. #endif