memory.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573
  1. /*
  2. * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
  3. * Copyright (c) by Takashi Iwai <tiwai@suse.de>
  4. *
  5. * EMU10K1 memory page allocation (PTB area)
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/pci.h>
  24. #include <linux/gfp.h>
  25. #include <linux/time.h>
  26. #include <linux/mutex.h>
  27. #include <linux/export.h>
  28. #include <sound/core.h>
  29. #include <sound/emu10k1.h>
  30. /* page arguments of these two macros are Emu page (4096 bytes), not like
  31. * aligned pages in others
  32. */
  33. #define __set_ptb_entry(emu,page,addr) \
  34. (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
  35. #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
  36. #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
  37. /* get aligned page from offset address */
  38. #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
  39. /* get offset address from aligned page */
  40. #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
  41. #if PAGE_SIZE == 4096
  42. /* page size == EMUPAGESIZE */
  43. /* fill PTB entrie(s) corresponding to page with addr */
  44. #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
  45. /* fill PTB entrie(s) corresponding to page with silence pointer */
  46. #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
  47. #else
  48. /* fill PTB entries -- we need to fill UNIT_PAGES entries */
  49. static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
  50. {
  51. int i;
  52. page *= UNIT_PAGES;
  53. for (i = 0; i < UNIT_PAGES; i++, page++) {
  54. __set_ptb_entry(emu, page, addr);
  55. addr += EMUPAGESIZE;
  56. }
  57. }
  58. static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
  59. {
  60. int i;
  61. page *= UNIT_PAGES;
  62. for (i = 0; i < UNIT_PAGES; i++, page++)
  63. /* do not increment ptr */
  64. __set_ptb_entry(emu, page, emu->silent_page.addr);
  65. }
  66. #endif /* PAGE_SIZE */
  67. /*
  68. */
  69. static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
  70. static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
  71. #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
  72. /* initialize emu10k1 part */
  73. static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
  74. {
  75. blk->mapped_page = -1;
  76. INIT_LIST_HEAD(&blk->mapped_link);
  77. INIT_LIST_HEAD(&blk->mapped_order_link);
  78. blk->map_locked = 0;
  79. blk->first_page = get_aligned_page(blk->mem.offset);
  80. blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
  81. blk->pages = blk->last_page - blk->first_page + 1;
  82. }
  83. /*
  84. * search empty region on PTB with the given size
  85. *
  86. * if an empty region is found, return the page and store the next mapped block
  87. * in nextp
  88. * if not found, return a negative error code.
  89. */
  90. static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
  91. {
  92. int page = 0, found_page = -ENOMEM;
  93. int max_size = npages;
  94. int size;
  95. struct list_head *candidate = &emu->mapped_link_head;
  96. struct list_head *pos;
  97. list_for_each (pos, &emu->mapped_link_head) {
  98. struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
  99. if (blk->mapped_page < 0)
  100. continue;
  101. size = blk->mapped_page - page;
  102. if (size == npages) {
  103. *nextp = pos;
  104. return page;
  105. }
  106. else if (size > max_size) {
  107. /* we look for the maximum empty hole */
  108. max_size = size;
  109. candidate = pos;
  110. found_page = page;
  111. }
  112. page = blk->mapped_page + blk->pages;
  113. }
  114. size = MAX_ALIGN_PAGES - page;
  115. if (size >= max_size) {
  116. *nextp = pos;
  117. return page;
  118. }
  119. *nextp = candidate;
  120. return found_page;
  121. }
  122. /*
  123. * map a memory block onto emu10k1's PTB
  124. *
  125. * call with memblk_lock held
  126. */
  127. static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  128. {
  129. int page, pg;
  130. struct list_head *next;
  131. page = search_empty_map_area(emu, blk->pages, &next);
  132. if (page < 0) /* not found */
  133. return page;
  134. /* insert this block in the proper position of mapped list */
  135. list_add_tail(&blk->mapped_link, next);
  136. /* append this as a newest block in order list */
  137. list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
  138. blk->mapped_page = page;
  139. /* fill PTB */
  140. for (pg = blk->first_page; pg <= blk->last_page; pg++) {
  141. set_ptb_entry(emu, page, emu->page_addr_table[pg]);
  142. page++;
  143. }
  144. return 0;
  145. }
  146. /*
  147. * unmap the block
  148. * return the size of resultant empty pages
  149. *
  150. * call with memblk_lock held
  151. */
  152. static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  153. {
  154. int start_page, end_page, mpage, pg;
  155. struct list_head *p;
  156. struct snd_emu10k1_memblk *q;
  157. /* calculate the expected size of empty region */
  158. if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
  159. q = get_emu10k1_memblk(p, mapped_link);
  160. start_page = q->mapped_page + q->pages;
  161. } else
  162. start_page = 0;
  163. if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
  164. q = get_emu10k1_memblk(p, mapped_link);
  165. end_page = q->mapped_page;
  166. } else
  167. end_page = MAX_ALIGN_PAGES;
  168. /* remove links */
  169. list_del(&blk->mapped_link);
  170. list_del(&blk->mapped_order_link);
  171. /* clear PTB */
  172. mpage = blk->mapped_page;
  173. for (pg = blk->first_page; pg <= blk->last_page; pg++) {
  174. set_silent_ptb(emu, mpage);
  175. mpage++;
  176. }
  177. blk->mapped_page = -1;
  178. return end_page - start_page; /* return the new empty size */
  179. }
  180. /*
  181. * search empty pages with the given size, and create a memory block
  182. *
  183. * unlike synth_alloc the memory block is aligned to the page start
  184. */
  185. static struct snd_emu10k1_memblk *
  186. search_empty(struct snd_emu10k1 *emu, int size)
  187. {
  188. struct list_head *p;
  189. struct snd_emu10k1_memblk *blk;
  190. int page, psize;
  191. psize = get_aligned_page(size + PAGE_SIZE -1);
  192. page = 0;
  193. list_for_each(p, &emu->memhdr->block) {
  194. blk = get_emu10k1_memblk(p, mem.list);
  195. if (page + psize <= blk->first_page)
  196. goto __found_pages;
  197. page = blk->last_page + 1;
  198. }
  199. if (page + psize > emu->max_cache_pages)
  200. return NULL;
  201. __found_pages:
  202. /* create a new memory block */
  203. blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
  204. if (blk == NULL)
  205. return NULL;
  206. blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
  207. emu10k1_memblk_init(blk);
  208. return blk;
  209. }
  210. /*
  211. * check if the given pointer is valid for pages
  212. */
  213. static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
  214. {
  215. if (addr & ~emu->dma_mask) {
  216. snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr);
  217. return 0;
  218. }
  219. if (addr & (EMUPAGESIZE-1)) {
  220. snd_printk(KERN_ERR "page is not aligned\n");
  221. return 0;
  222. }
  223. return 1;
  224. }
  225. /*
  226. * map the given memory block on PTB.
  227. * if the block is already mapped, update the link order.
  228. * if no empty pages are found, tries to release unused memory blocks
  229. * and retry the mapping.
  230. */
  231. int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  232. {
  233. int err;
  234. int size;
  235. struct list_head *p, *nextp;
  236. struct snd_emu10k1_memblk *deleted;
  237. unsigned long flags;
  238. spin_lock_irqsave(&emu->memblk_lock, flags);
  239. if (blk->mapped_page >= 0) {
  240. /* update order link */
  241. list_del(&blk->mapped_order_link);
  242. list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
  243. spin_unlock_irqrestore(&emu->memblk_lock, flags);
  244. return 0;
  245. }
  246. if ((err = map_memblk(emu, blk)) < 0) {
  247. /* no enough page - try to unmap some blocks */
  248. /* starting from the oldest block */
  249. p = emu->mapped_order_link_head.next;
  250. for (; p != &emu->mapped_order_link_head; p = nextp) {
  251. nextp = p->next;
  252. deleted = get_emu10k1_memblk(p, mapped_order_link);
  253. if (deleted->map_locked)
  254. continue;
  255. size = unmap_memblk(emu, deleted);
  256. if (size >= blk->pages) {
  257. /* ok the empty region is enough large */
  258. err = map_memblk(emu, blk);
  259. break;
  260. }
  261. }
  262. }
  263. spin_unlock_irqrestore(&emu->memblk_lock, flags);
  264. return err;
  265. }
  266. EXPORT_SYMBOL(snd_emu10k1_memblk_map);
  267. /*
  268. * page allocation for DMA
  269. */
  270. struct snd_util_memblk *
  271. snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
  272. {
  273. struct snd_pcm_runtime *runtime = substream->runtime;
  274. struct snd_util_memhdr *hdr;
  275. struct snd_emu10k1_memblk *blk;
  276. int page, err, idx;
  277. if (snd_BUG_ON(!emu))
  278. return NULL;
  279. if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
  280. runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
  281. return NULL;
  282. hdr = emu->memhdr;
  283. if (snd_BUG_ON(!hdr))
  284. return NULL;
  285. idx = runtime->period_size >= runtime->buffer_size ?
  286. (emu->delay_pcm_irq * 2) : 0;
  287. mutex_lock(&hdr->block_mutex);
  288. blk = search_empty(emu, runtime->dma_bytes + idx);
  289. if (blk == NULL) {
  290. mutex_unlock(&hdr->block_mutex);
  291. return NULL;
  292. }
  293. /* fill buffer addresses but pointers are not stored so that
  294. * snd_free_pci_page() is not called in in synth_free()
  295. */
  296. idx = 0;
  297. for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
  298. unsigned long ofs = idx << PAGE_SHIFT;
  299. dma_addr_t addr;
  300. addr = snd_pcm_sgbuf_get_addr(substream, ofs);
  301. if (! is_valid_page(emu, addr)) {
  302. printk(KERN_ERR "emu: failure page = %d\n", idx);
  303. mutex_unlock(&hdr->block_mutex);
  304. return NULL;
  305. }
  306. emu->page_addr_table[page] = addr;
  307. emu->page_ptr_table[page] = NULL;
  308. }
  309. /* set PTB entries */
  310. blk->map_locked = 1; /* do not unmap this block! */
  311. err = snd_emu10k1_memblk_map(emu, blk);
  312. if (err < 0) {
  313. __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
  314. mutex_unlock(&hdr->block_mutex);
  315. return NULL;
  316. }
  317. mutex_unlock(&hdr->block_mutex);
  318. return (struct snd_util_memblk *)blk;
  319. }
  320. /*
  321. * release DMA buffer from page table
  322. */
  323. int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
  324. {
  325. if (snd_BUG_ON(!emu || !blk))
  326. return -EINVAL;
  327. return snd_emu10k1_synth_free(emu, blk);
  328. }
  329. /*
  330. * memory allocation using multiple pages (for synth)
  331. * Unlike the DMA allocation above, non-contiguous pages are assined.
  332. */
  333. /*
  334. * allocate a synth sample area
  335. */
  336. struct snd_util_memblk *
  337. snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
  338. {
  339. struct snd_emu10k1_memblk *blk;
  340. struct snd_util_memhdr *hdr = hw->memhdr;
  341. mutex_lock(&hdr->block_mutex);
  342. blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
  343. if (blk == NULL) {
  344. mutex_unlock(&hdr->block_mutex);
  345. return NULL;
  346. }
  347. if (synth_alloc_pages(hw, blk)) {
  348. __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
  349. mutex_unlock(&hdr->block_mutex);
  350. return NULL;
  351. }
  352. snd_emu10k1_memblk_map(hw, blk);
  353. mutex_unlock(&hdr->block_mutex);
  354. return (struct snd_util_memblk *)blk;
  355. }
  356. EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
  357. /*
  358. * free a synth sample area
  359. */
  360. int
  361. snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
  362. {
  363. struct snd_util_memhdr *hdr = emu->memhdr;
  364. struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
  365. unsigned long flags;
  366. mutex_lock(&hdr->block_mutex);
  367. spin_lock_irqsave(&emu->memblk_lock, flags);
  368. if (blk->mapped_page >= 0)
  369. unmap_memblk(emu, blk);
  370. spin_unlock_irqrestore(&emu->memblk_lock, flags);
  371. synth_free_pages(emu, blk);
  372. __snd_util_mem_free(hdr, memblk);
  373. mutex_unlock(&hdr->block_mutex);
  374. return 0;
  375. }
  376. EXPORT_SYMBOL(snd_emu10k1_synth_free);
  377. /* check new allocation range */
  378. static void get_single_page_range(struct snd_util_memhdr *hdr,
  379. struct snd_emu10k1_memblk *blk,
  380. int *first_page_ret, int *last_page_ret)
  381. {
  382. struct list_head *p;
  383. struct snd_emu10k1_memblk *q;
  384. int first_page, last_page;
  385. first_page = blk->first_page;
  386. if ((p = blk->mem.list.prev) != &hdr->block) {
  387. q = get_emu10k1_memblk(p, mem.list);
  388. if (q->last_page == first_page)
  389. first_page++; /* first page was already allocated */
  390. }
  391. last_page = blk->last_page;
  392. if ((p = blk->mem.list.next) != &hdr->block) {
  393. q = get_emu10k1_memblk(p, mem.list);
  394. if (q->first_page == last_page)
  395. last_page--; /* last page was already allocated */
  396. }
  397. *first_page_ret = first_page;
  398. *last_page_ret = last_page;
  399. }
  400. /* release allocated pages */
  401. static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
  402. int last_page)
  403. {
  404. int page;
  405. for (page = first_page; page <= last_page; page++) {
  406. free_page((unsigned long)emu->page_ptr_table[page]);
  407. emu->page_addr_table[page] = 0;
  408. emu->page_ptr_table[page] = NULL;
  409. }
  410. }
  411. /*
  412. * allocate kernel pages
  413. */
  414. static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  415. {
  416. int page, first_page, last_page;
  417. emu10k1_memblk_init(blk);
  418. get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
  419. /* allocate kernel pages */
  420. for (page = first_page; page <= last_page; page++) {
  421. /* first try to allocate from <4GB zone */
  422. struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 |
  423. __GFP_NOWARN);
  424. if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) {
  425. if (p)
  426. __free_page(p);
  427. /* try to allocate from <16MB zone */
  428. p = alloc_page(GFP_ATOMIC | GFP_DMA |
  429. __GFP_NORETRY | /* no OOM-killer */
  430. __GFP_NOWARN);
  431. }
  432. if (!p) {
  433. __synth_free_pages(emu, first_page, page - 1);
  434. return -ENOMEM;
  435. }
  436. emu->page_addr_table[page] = page_to_phys(p);
  437. emu->page_ptr_table[page] = page_address(p);
  438. }
  439. return 0;
  440. }
  441. /*
  442. * free pages
  443. */
  444. static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  445. {
  446. int first_page, last_page;
  447. get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
  448. __synth_free_pages(emu, first_page, last_page);
  449. return 0;
  450. }
  451. /* calculate buffer pointer from offset address */
  452. static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
  453. {
  454. char *ptr;
  455. if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
  456. return NULL;
  457. ptr = emu->page_ptr_table[page];
  458. if (! ptr) {
  459. printk(KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page);
  460. return NULL;
  461. }
  462. ptr += offset & (PAGE_SIZE - 1);
  463. return (void*)ptr;
  464. }
  465. /*
  466. * bzero(blk + offset, size)
  467. */
  468. int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
  469. int offset, int size)
  470. {
  471. int page, nextofs, end_offset, temp, temp1;
  472. void *ptr;
  473. struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
  474. offset += blk->offset & (PAGE_SIZE - 1);
  475. end_offset = offset + size;
  476. page = get_aligned_page(offset);
  477. do {
  478. nextofs = aligned_page_offset(page + 1);
  479. temp = nextofs - offset;
  480. temp1 = end_offset - offset;
  481. if (temp1 < temp)
  482. temp = temp1;
  483. ptr = offset_ptr(emu, page + p->first_page, offset);
  484. if (ptr)
  485. memset(ptr, 0, temp);
  486. offset = nextofs;
  487. page++;
  488. } while (offset < end_offset);
  489. return 0;
  490. }
  491. EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
  492. /*
  493. * copy_from_user(blk + offset, data, size)
  494. */
  495. int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
  496. int offset, const char __user *data, int size)
  497. {
  498. int page, nextofs, end_offset, temp, temp1;
  499. void *ptr;
  500. struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
  501. offset += blk->offset & (PAGE_SIZE - 1);
  502. end_offset = offset + size;
  503. page = get_aligned_page(offset);
  504. do {
  505. nextofs = aligned_page_offset(page + 1);
  506. temp = nextofs - offset;
  507. temp1 = end_offset - offset;
  508. if (temp1 < temp)
  509. temp = temp1;
  510. ptr = offset_ptr(emu, page + p->first_page, offset);
  511. if (ptr && copy_from_user(ptr, data, temp))
  512. return -EFAULT;
  513. offset = nextofs;
  514. data += temp;
  515. page++;
  516. } while (offset < end_offset);
  517. return 0;
  518. }
  519. EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);