sram-alloc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850
  1. /*
  2. * SRAM allocator for Blackfin on-chip memory
  3. *
  4. * Copyright 2004-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/types.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/ioport.h>
  13. #include <linux/fcntl.h>
  14. #include <linux/init.h>
  15. #include <linux/poll.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/rtc.h>
  19. #include <asm/blackfin.h>
  20. #include <asm/mem_map.h>
  21. #include "blackfin_sram.h"
  22. /* the data structure for L1 scratchpad and DATA SRAM */
  23. struct sram_piece {
  24. void *paddr;
  25. int size;
  26. pid_t pid;
  27. struct sram_piece *next;
  28. };
  29. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
  30. static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
  31. static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
  32. #if L1_DATA_A_LENGTH != 0
  33. static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
  34. static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
  35. #endif
  36. #if L1_DATA_B_LENGTH != 0
  37. static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
  38. static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
  39. #endif
  40. #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
  41. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
  42. #endif
  43. #if L1_CODE_LENGTH != 0
  44. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
  45. static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
  46. static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
  47. #endif
  48. #if L2_LENGTH != 0
  49. static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
  50. static struct sram_piece free_l2_sram_head, used_l2_sram_head;
  51. #endif
  52. static struct kmem_cache *sram_piece_cache;
  53. /* L1 Scratchpad SRAM initialization function */
  54. static void __init l1sram_init(void)
  55. {
  56. unsigned int cpu;
  57. unsigned long reserve;
  58. #ifdef CONFIG_SMP
  59. reserve = 0;
  60. #else
  61. reserve = sizeof(struct l1_scratch_task_info);
  62. #endif
  63. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  64. per_cpu(free_l1_ssram_head, cpu).next =
  65. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  66. if (!per_cpu(free_l1_ssram_head, cpu).next) {
  67. printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
  68. return;
  69. }
  70. per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
  71. per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
  72. per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
  73. per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
  74. per_cpu(used_l1_ssram_head, cpu).next = NULL;
  75. /* mutex initialize */
  76. spin_lock_init(&per_cpu(l1sram_lock, cpu));
  77. printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
  78. L1_SCRATCH_LENGTH >> 10);
  79. }
  80. }
  81. static void __init l1_data_sram_init(void)
  82. {
  83. #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
  84. unsigned int cpu;
  85. #endif
  86. #if L1_DATA_A_LENGTH != 0
  87. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  88. per_cpu(free_l1_data_A_sram_head, cpu).next =
  89. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  90. if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
  91. printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
  92. return;
  93. }
  94. per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
  95. (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
  96. per_cpu(free_l1_data_A_sram_head, cpu).next->size =
  97. L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
  98. per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
  99. per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
  100. per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
  101. printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
  102. L1_DATA_A_LENGTH >> 10,
  103. per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
  104. }
  105. #endif
  106. #if L1_DATA_B_LENGTH != 0
  107. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  108. per_cpu(free_l1_data_B_sram_head, cpu).next =
  109. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  110. if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
  111. printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
  112. return;
  113. }
  114. per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
  115. (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
  116. per_cpu(free_l1_data_B_sram_head, cpu).next->size =
  117. L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
  118. per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
  119. per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
  120. per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
  121. printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
  122. L1_DATA_B_LENGTH >> 10,
  123. per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
  124. /* mutex initialize */
  125. }
  126. #endif
  127. #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
  128. for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
  129. spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
  130. #endif
  131. }
  132. static void __init l1_inst_sram_init(void)
  133. {
  134. #if L1_CODE_LENGTH != 0
  135. unsigned int cpu;
  136. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  137. per_cpu(free_l1_inst_sram_head, cpu).next =
  138. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  139. if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
  140. printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
  141. return;
  142. }
  143. per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
  144. (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
  145. per_cpu(free_l1_inst_sram_head, cpu).next->size =
  146. L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
  147. per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
  148. per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
  149. per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
  150. printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
  151. L1_CODE_LENGTH >> 10,
  152. per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
  153. /* mutex initialize */
  154. spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
  155. }
  156. #endif
  157. }
  158. static void __init l2_sram_init(void)
  159. {
  160. #if L2_LENGTH != 0
  161. free_l2_sram_head.next =
  162. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  163. if (!free_l2_sram_head.next) {
  164. printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
  165. return;
  166. }
  167. free_l2_sram_head.next->paddr =
  168. (void *)L2_START + (_ebss_l2 - _stext_l2);
  169. free_l2_sram_head.next->size =
  170. L2_LENGTH - (_ebss_l2 - _stext_l2);
  171. free_l2_sram_head.next->pid = 0;
  172. free_l2_sram_head.next->next = NULL;
  173. used_l2_sram_head.next = NULL;
  174. printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
  175. L2_LENGTH >> 10,
  176. free_l2_sram_head.next->size >> 10);
  177. /* mutex initialize */
  178. spin_lock_init(&l2_sram_lock);
  179. #endif
  180. }
  181. static int __init bfin_sram_init(void)
  182. {
  183. sram_piece_cache = kmem_cache_create("sram_piece_cache",
  184. sizeof(struct sram_piece),
  185. 0, SLAB_PANIC, NULL);
  186. l1sram_init();
  187. l1_data_sram_init();
  188. l1_inst_sram_init();
  189. l2_sram_init();
  190. return 0;
  191. }
  192. pure_initcall(bfin_sram_init);
  193. /* SRAM allocate function */
  194. static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
  195. struct sram_piece *pused_head)
  196. {
  197. struct sram_piece *pslot, *plast, *pavail;
  198. if (size <= 0 || !pfree_head || !pused_head)
  199. return NULL;
  200. /* Align the size */
  201. size = (size + 3) & ~3;
  202. pslot = pfree_head->next;
  203. plast = pfree_head;
  204. /* search an available piece slot */
  205. while (pslot != NULL && size > pslot->size) {
  206. plast = pslot;
  207. pslot = pslot->next;
  208. }
  209. if (!pslot)
  210. return NULL;
  211. if (pslot->size == size) {
  212. plast->next = pslot->next;
  213. pavail = pslot;
  214. } else {
  215. pavail = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  216. if (!pavail)
  217. return NULL;
  218. pavail->paddr = pslot->paddr;
  219. pavail->size = size;
  220. pslot->paddr += size;
  221. pslot->size -= size;
  222. }
  223. pavail->pid = current->pid;
  224. pslot = pused_head->next;
  225. plast = pused_head;
  226. /* insert new piece into used piece list !!! */
  227. while (pslot != NULL && pavail->paddr < pslot->paddr) {
  228. plast = pslot;
  229. pslot = pslot->next;
  230. }
  231. pavail->next = pslot;
  232. plast->next = pavail;
  233. return pavail->paddr;
  234. }
  235. /* Allocate the largest available block. */
  236. static void *_sram_alloc_max(struct sram_piece *pfree_head,
  237. struct sram_piece *pused_head,
  238. unsigned long *psize)
  239. {
  240. struct sram_piece *pslot, *pmax;
  241. if (!pfree_head || !pused_head)
  242. return NULL;
  243. pmax = pslot = pfree_head->next;
  244. /* search an available piece slot */
  245. while (pslot != NULL) {
  246. if (pslot->size > pmax->size)
  247. pmax = pslot;
  248. pslot = pslot->next;
  249. }
  250. if (!pmax)
  251. return NULL;
  252. *psize = pmax->size;
  253. return _sram_alloc(*psize, pfree_head, pused_head);
  254. }
  255. /* SRAM free function */
  256. static int _sram_free(const void *addr,
  257. struct sram_piece *pfree_head,
  258. struct sram_piece *pused_head)
  259. {
  260. struct sram_piece *pslot, *plast, *pavail;
  261. if (!pfree_head || !pused_head)
  262. return -1;
  263. /* search the relevant memory slot */
  264. pslot = pused_head->next;
  265. plast = pused_head;
  266. /* search an available piece slot */
  267. while (pslot != NULL && pslot->paddr != addr) {
  268. plast = pslot;
  269. pslot = pslot->next;
  270. }
  271. if (!pslot)
  272. return -1;
  273. plast->next = pslot->next;
  274. pavail = pslot;
  275. pavail->pid = 0;
  276. /* insert free pieces back to the free list */
  277. pslot = pfree_head->next;
  278. plast = pfree_head;
  279. while (pslot != NULL && addr > pslot->paddr) {
  280. plast = pslot;
  281. pslot = pslot->next;
  282. }
  283. if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
  284. plast->size += pavail->size;
  285. kmem_cache_free(sram_piece_cache, pavail);
  286. } else {
  287. pavail->next = plast->next;
  288. plast->next = pavail;
  289. plast = pavail;
  290. }
  291. if (pslot && plast->paddr + plast->size == pslot->paddr) {
  292. plast->size += pslot->size;
  293. plast->next = pslot->next;
  294. kmem_cache_free(sram_piece_cache, pslot);
  295. }
  296. return 0;
  297. }
  298. int sram_free(const void *addr)
  299. {
  300. #if L1_CODE_LENGTH != 0
  301. if (addr >= (void *)get_l1_code_start()
  302. && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
  303. return l1_inst_sram_free(addr);
  304. else
  305. #endif
  306. #if L1_DATA_A_LENGTH != 0
  307. if (addr >= (void *)get_l1_data_a_start()
  308. && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
  309. return l1_data_A_sram_free(addr);
  310. else
  311. #endif
  312. #if L1_DATA_B_LENGTH != 0
  313. if (addr >= (void *)get_l1_data_b_start()
  314. && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
  315. return l1_data_B_sram_free(addr);
  316. else
  317. #endif
  318. #if L2_LENGTH != 0
  319. if (addr >= (void *)L2_START
  320. && addr < (void *)(L2_START + L2_LENGTH))
  321. return l2_sram_free(addr);
  322. else
  323. #endif
  324. return -1;
  325. }
  326. EXPORT_SYMBOL(sram_free);
  327. void *l1_data_A_sram_alloc(size_t size)
  328. {
  329. #if L1_DATA_A_LENGTH != 0
  330. unsigned long flags;
  331. void *addr;
  332. unsigned int cpu;
  333. cpu = smp_processor_id();
  334. /* add mutex operation */
  335. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  336. addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
  337. &per_cpu(used_l1_data_A_sram_head, cpu));
  338. /* add mutex operation */
  339. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  340. pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
  341. (long unsigned int)addr, size);
  342. return addr;
  343. #else
  344. return NULL;
  345. #endif
  346. }
  347. EXPORT_SYMBOL(l1_data_A_sram_alloc);
  348. int l1_data_A_sram_free(const void *addr)
  349. {
  350. #if L1_DATA_A_LENGTH != 0
  351. unsigned long flags;
  352. int ret;
  353. unsigned int cpu;
  354. cpu = smp_processor_id();
  355. /* add mutex operation */
  356. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  357. ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
  358. &per_cpu(used_l1_data_A_sram_head, cpu));
  359. /* add mutex operation */
  360. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  361. return ret;
  362. #else
  363. return -1;
  364. #endif
  365. }
  366. EXPORT_SYMBOL(l1_data_A_sram_free);
  367. void *l1_data_B_sram_alloc(size_t size)
  368. {
  369. #if L1_DATA_B_LENGTH != 0
  370. unsigned long flags;
  371. void *addr;
  372. unsigned int cpu;
  373. cpu = smp_processor_id();
  374. /* add mutex operation */
  375. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  376. addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
  377. &per_cpu(used_l1_data_B_sram_head, cpu));
  378. /* add mutex operation */
  379. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  380. pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
  381. (long unsigned int)addr, size);
  382. return addr;
  383. #else
  384. return NULL;
  385. #endif
  386. }
  387. EXPORT_SYMBOL(l1_data_B_sram_alloc);
  388. int l1_data_B_sram_free(const void *addr)
  389. {
  390. #if L1_DATA_B_LENGTH != 0
  391. unsigned long flags;
  392. int ret;
  393. unsigned int cpu;
  394. cpu = smp_processor_id();
  395. /* add mutex operation */
  396. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  397. ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
  398. &per_cpu(used_l1_data_B_sram_head, cpu));
  399. /* add mutex operation */
  400. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  401. return ret;
  402. #else
  403. return -1;
  404. #endif
  405. }
  406. EXPORT_SYMBOL(l1_data_B_sram_free);
  407. void *l1_data_sram_alloc(size_t size)
  408. {
  409. void *addr = l1_data_A_sram_alloc(size);
  410. if (!addr)
  411. addr = l1_data_B_sram_alloc(size);
  412. return addr;
  413. }
  414. EXPORT_SYMBOL(l1_data_sram_alloc);
  415. void *l1_data_sram_zalloc(size_t size)
  416. {
  417. void *addr = l1_data_sram_alloc(size);
  418. if (addr)
  419. memset(addr, 0x00, size);
  420. return addr;
  421. }
  422. EXPORT_SYMBOL(l1_data_sram_zalloc);
  423. int l1_data_sram_free(const void *addr)
  424. {
  425. int ret;
  426. ret = l1_data_A_sram_free(addr);
  427. if (ret == -1)
  428. ret = l1_data_B_sram_free(addr);
  429. return ret;
  430. }
  431. EXPORT_SYMBOL(l1_data_sram_free);
  432. void *l1_inst_sram_alloc(size_t size)
  433. {
  434. #if L1_CODE_LENGTH != 0
  435. unsigned long flags;
  436. void *addr;
  437. unsigned int cpu;
  438. cpu = smp_processor_id();
  439. /* add mutex operation */
  440. spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
  441. addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
  442. &per_cpu(used_l1_inst_sram_head, cpu));
  443. /* add mutex operation */
  444. spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
  445. pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
  446. (long unsigned int)addr, size);
  447. return addr;
  448. #else
  449. return NULL;
  450. #endif
  451. }
  452. EXPORT_SYMBOL(l1_inst_sram_alloc);
  453. int l1_inst_sram_free(const void *addr)
  454. {
  455. #if L1_CODE_LENGTH != 0
  456. unsigned long flags;
  457. int ret;
  458. unsigned int cpu;
  459. cpu = smp_processor_id();
  460. /* add mutex operation */
  461. spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
  462. ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
  463. &per_cpu(used_l1_inst_sram_head, cpu));
  464. /* add mutex operation */
  465. spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
  466. return ret;
  467. #else
  468. return -1;
  469. #endif
  470. }
  471. EXPORT_SYMBOL(l1_inst_sram_free);
  472. /* L1 Scratchpad memory allocate function */
  473. void *l1sram_alloc(size_t size)
  474. {
  475. unsigned long flags;
  476. void *addr;
  477. unsigned int cpu;
  478. cpu = smp_processor_id();
  479. /* add mutex operation */
  480. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  481. addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
  482. &per_cpu(used_l1_ssram_head, cpu));
  483. /* add mutex operation */
  484. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  485. return addr;
  486. }
  487. /* L1 Scratchpad memory allocate function */
  488. void *l1sram_alloc_max(size_t *psize)
  489. {
  490. unsigned long flags;
  491. void *addr;
  492. unsigned int cpu;
  493. cpu = smp_processor_id();
  494. /* add mutex operation */
  495. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  496. addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
  497. &per_cpu(used_l1_ssram_head, cpu), psize);
  498. /* add mutex operation */
  499. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  500. return addr;
  501. }
  502. /* L1 Scratchpad memory free function */
  503. int l1sram_free(const void *addr)
  504. {
  505. unsigned long flags;
  506. int ret;
  507. unsigned int cpu;
  508. cpu = smp_processor_id();
  509. /* add mutex operation */
  510. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  511. ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
  512. &per_cpu(used_l1_ssram_head, cpu));
  513. /* add mutex operation */
  514. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  515. return ret;
  516. }
  517. void *l2_sram_alloc(size_t size)
  518. {
  519. #if L2_LENGTH != 0
  520. unsigned long flags;
  521. void *addr;
  522. /* add mutex operation */
  523. spin_lock_irqsave(&l2_sram_lock, flags);
  524. addr = _sram_alloc(size, &free_l2_sram_head,
  525. &used_l2_sram_head);
  526. /* add mutex operation */
  527. spin_unlock_irqrestore(&l2_sram_lock, flags);
  528. pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
  529. (long unsigned int)addr, size);
  530. return addr;
  531. #else
  532. return NULL;
  533. #endif
  534. }
  535. EXPORT_SYMBOL(l2_sram_alloc);
  536. void *l2_sram_zalloc(size_t size)
  537. {
  538. void *addr = l2_sram_alloc(size);
  539. if (addr)
  540. memset(addr, 0x00, size);
  541. return addr;
  542. }
  543. EXPORT_SYMBOL(l2_sram_zalloc);
  544. int l2_sram_free(const void *addr)
  545. {
  546. #if L2_LENGTH != 0
  547. unsigned long flags;
  548. int ret;
  549. /* add mutex operation */
  550. spin_lock_irqsave(&l2_sram_lock, flags);
  551. ret = _sram_free(addr, &free_l2_sram_head,
  552. &used_l2_sram_head);
  553. /* add mutex operation */
  554. spin_unlock_irqrestore(&l2_sram_lock, flags);
  555. return ret;
  556. #else
  557. return -1;
  558. #endif
  559. }
  560. EXPORT_SYMBOL(l2_sram_free);
  561. int sram_free_with_lsl(const void *addr)
  562. {
  563. struct sram_list_struct *lsl, **tmp;
  564. struct mm_struct *mm = current->mm;
  565. for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
  566. if ((*tmp)->addr == addr)
  567. goto found;
  568. return -1;
  569. found:
  570. lsl = *tmp;
  571. sram_free(addr);
  572. *tmp = lsl->next;
  573. kfree(lsl);
  574. return 0;
  575. }
  576. EXPORT_SYMBOL(sram_free_with_lsl);
  577. /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
  578. * tracked. These are designed for userspace so that when a process exits,
  579. * we can safely reap their resources.
  580. */
  581. void *sram_alloc_with_lsl(size_t size, unsigned long flags)
  582. {
  583. void *addr = NULL;
  584. struct sram_list_struct *lsl = NULL;
  585. struct mm_struct *mm = current->mm;
  586. lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
  587. if (!lsl)
  588. return NULL;
  589. if (flags & L1_INST_SRAM)
  590. addr = l1_inst_sram_alloc(size);
  591. if (addr == NULL && (flags & L1_DATA_A_SRAM))
  592. addr = l1_data_A_sram_alloc(size);
  593. if (addr == NULL && (flags & L1_DATA_B_SRAM))
  594. addr = l1_data_B_sram_alloc(size);
  595. if (addr == NULL && (flags & L2_SRAM))
  596. addr = l2_sram_alloc(size);
  597. if (addr == NULL) {
  598. kfree(lsl);
  599. return NULL;
  600. }
  601. lsl->addr = addr;
  602. lsl->length = size;
  603. lsl->next = mm->context.sram_list;
  604. mm->context.sram_list = lsl;
  605. return addr;
  606. }
  607. EXPORT_SYMBOL(sram_alloc_with_lsl);
  608. #ifdef CONFIG_PROC_FS
  609. /* Once we get a real allocator, we'll throw all of this away.
  610. * Until then, we need some sort of visibility into the L1 alloc.
  611. */
  612. /* Need to keep line of output the same. Currently, that is 44 bytes
  613. * (including newline).
  614. */
  615. static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
  616. struct sram_piece *pfree_head,
  617. struct sram_piece *pused_head)
  618. {
  619. struct sram_piece *pslot;
  620. if (!pfree_head || !pused_head)
  621. return -1;
  622. *len += sprintf(&buf[*len], "--- SRAM %-14s Size PID State \n", desc);
  623. /* search the relevant memory slot */
  624. pslot = pused_head->next;
  625. while (pslot != NULL) {
  626. *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
  627. pslot->paddr, pslot->paddr + pslot->size,
  628. pslot->size, pslot->pid, "ALLOCATED");
  629. pslot = pslot->next;
  630. }
  631. pslot = pfree_head->next;
  632. while (pslot != NULL) {
  633. *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
  634. pslot->paddr, pslot->paddr + pslot->size,
  635. pslot->size, pslot->pid, "FREE");
  636. pslot = pslot->next;
  637. }
  638. return 0;
  639. }
  640. static int sram_proc_read(char *buf, char **start, off_t offset, int count,
  641. int *eof, void *data)
  642. {
  643. int len = 0;
  644. unsigned int cpu;
  645. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  646. if (_sram_proc_read(buf, &len, count, "Scratchpad",
  647. &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
  648. goto not_done;
  649. #if L1_DATA_A_LENGTH != 0
  650. if (_sram_proc_read(buf, &len, count, "L1 Data A",
  651. &per_cpu(free_l1_data_A_sram_head, cpu),
  652. &per_cpu(used_l1_data_A_sram_head, cpu)))
  653. goto not_done;
  654. #endif
  655. #if L1_DATA_B_LENGTH != 0
  656. if (_sram_proc_read(buf, &len, count, "L1 Data B",
  657. &per_cpu(free_l1_data_B_sram_head, cpu),
  658. &per_cpu(used_l1_data_B_sram_head, cpu)))
  659. goto not_done;
  660. #endif
  661. #if L1_CODE_LENGTH != 0
  662. if (_sram_proc_read(buf, &len, count, "L1 Instruction",
  663. &per_cpu(free_l1_inst_sram_head, cpu),
  664. &per_cpu(used_l1_inst_sram_head, cpu)))
  665. goto not_done;
  666. #endif
  667. }
  668. #if L2_LENGTH != 0
  669. if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head,
  670. &used_l2_sram_head))
  671. goto not_done;
  672. #endif
  673. *eof = 1;
  674. not_done:
  675. return len;
  676. }
  677. static int __init sram_proc_init(void)
  678. {
  679. struct proc_dir_entry *ptr;
  680. ptr = create_proc_entry("sram", S_IFREG | S_IRUGO, NULL);
  681. if (!ptr) {
  682. printk(KERN_WARNING "unable to create /proc/sram\n");
  683. return -1;
  684. }
  685. ptr->read_proc = sram_proc_read;
  686. return 0;
  687. }
  688. late_initcall(sram_proc_init);
  689. #endif