memory.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572
  1. /*
  2. * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
  3. * Copyright (c) by Takashi Iwai <tiwai@suse.de>
  4. *
  5. * EMU10K1 memory page allocation (PTB area)
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/pci.h>
  24. #include <linux/gfp.h>
  25. #include <linux/time.h>
  26. #include <linux/mutex.h>
  27. #include <sound/core.h>
  28. #include <sound/emu10k1.h>
  29. /* page arguments of these two macros are Emu page (4096 bytes), not like
  30. * aligned pages in others
  31. */
  32. #define __set_ptb_entry(emu,page,addr) \
  33. (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
  34. #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
  35. #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
  36. /* get aligned page from offset address */
  37. #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
  38. /* get offset address from aligned page */
  39. #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
  40. #if PAGE_SIZE == 4096
  41. /* page size == EMUPAGESIZE */
  42. /* fill PTB entrie(s) corresponding to page with addr */
  43. #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
  44. /* fill PTB entrie(s) corresponding to page with silence pointer */
  45. #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
  46. #else
  47. /* fill PTB entries -- we need to fill UNIT_PAGES entries */
  48. static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
  49. {
  50. int i;
  51. page *= UNIT_PAGES;
  52. for (i = 0; i < UNIT_PAGES; i++, page++) {
  53. __set_ptb_entry(emu, page, addr);
  54. addr += EMUPAGESIZE;
  55. }
  56. }
  57. static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
  58. {
  59. int i;
  60. page *= UNIT_PAGES;
  61. for (i = 0; i < UNIT_PAGES; i++, page++)
  62. /* do not increment ptr */
  63. __set_ptb_entry(emu, page, emu->silent_page.addr);
  64. }
  65. #endif /* PAGE_SIZE */
  66. /*
  67. */
  68. static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
  69. static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
  70. #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
  71. /* initialize emu10k1 part */
  72. static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
  73. {
  74. blk->mapped_page = -1;
  75. INIT_LIST_HEAD(&blk->mapped_link);
  76. INIT_LIST_HEAD(&blk->mapped_order_link);
  77. blk->map_locked = 0;
  78. blk->first_page = get_aligned_page(blk->mem.offset);
  79. blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
  80. blk->pages = blk->last_page - blk->first_page + 1;
  81. }
  82. /*
  83. * search empty region on PTB with the given size
  84. *
  85. * if an empty region is found, return the page and store the next mapped block
  86. * in nextp
  87. * if not found, return a negative error code.
  88. */
  89. static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
  90. {
  91. int page = 0, found_page = -ENOMEM;
  92. int max_size = npages;
  93. int size;
  94. struct list_head *candidate = &emu->mapped_link_head;
  95. struct list_head *pos;
  96. list_for_each (pos, &emu->mapped_link_head) {
  97. struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
  98. if (blk->mapped_page < 0)
  99. continue;
  100. size = blk->mapped_page - page;
  101. if (size == npages) {
  102. *nextp = pos;
  103. return page;
  104. }
  105. else if (size > max_size) {
  106. /* we look for the maximum empty hole */
  107. max_size = size;
  108. candidate = pos;
  109. found_page = page;
  110. }
  111. page = blk->mapped_page + blk->pages;
  112. }
  113. size = MAX_ALIGN_PAGES - page;
  114. if (size >= max_size) {
  115. *nextp = pos;
  116. return page;
  117. }
  118. *nextp = candidate;
  119. return found_page;
  120. }
  121. /*
  122. * map a memory block onto emu10k1's PTB
  123. *
  124. * call with memblk_lock held
  125. */
  126. static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  127. {
  128. int page, pg;
  129. struct list_head *next;
  130. page = search_empty_map_area(emu, blk->pages, &next);
  131. if (page < 0) /* not found */
  132. return page;
  133. /* insert this block in the proper position of mapped list */
  134. list_add_tail(&blk->mapped_link, next);
  135. /* append this as a newest block in order list */
  136. list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
  137. blk->mapped_page = page;
  138. /* fill PTB */
  139. for (pg = blk->first_page; pg <= blk->last_page; pg++) {
  140. set_ptb_entry(emu, page, emu->page_addr_table[pg]);
  141. page++;
  142. }
  143. return 0;
  144. }
  145. /*
  146. * unmap the block
  147. * return the size of resultant empty pages
  148. *
  149. * call with memblk_lock held
  150. */
  151. static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  152. {
  153. int start_page, end_page, mpage, pg;
  154. struct list_head *p;
  155. struct snd_emu10k1_memblk *q;
  156. /* calculate the expected size of empty region */
  157. if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
  158. q = get_emu10k1_memblk(p, mapped_link);
  159. start_page = q->mapped_page + q->pages;
  160. } else
  161. start_page = 0;
  162. if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
  163. q = get_emu10k1_memblk(p, mapped_link);
  164. end_page = q->mapped_page;
  165. } else
  166. end_page = MAX_ALIGN_PAGES;
  167. /* remove links */
  168. list_del(&blk->mapped_link);
  169. list_del(&blk->mapped_order_link);
  170. /* clear PTB */
  171. mpage = blk->mapped_page;
  172. for (pg = blk->first_page; pg <= blk->last_page; pg++) {
  173. set_silent_ptb(emu, mpage);
  174. mpage++;
  175. }
  176. blk->mapped_page = -1;
  177. return end_page - start_page; /* return the new empty size */
  178. }
  179. /*
  180. * search empty pages with the given size, and create a memory block
  181. *
  182. * unlike synth_alloc the memory block is aligned to the page start
  183. */
  184. static struct snd_emu10k1_memblk *
  185. search_empty(struct snd_emu10k1 *emu, int size)
  186. {
  187. struct list_head *p;
  188. struct snd_emu10k1_memblk *blk;
  189. int page, psize;
  190. psize = get_aligned_page(size + PAGE_SIZE -1);
  191. page = 0;
  192. list_for_each(p, &emu->memhdr->block) {
  193. blk = get_emu10k1_memblk(p, mem.list);
  194. if (page + psize <= blk->first_page)
  195. goto __found_pages;
  196. page = blk->last_page + 1;
  197. }
  198. if (page + psize > emu->max_cache_pages)
  199. return NULL;
  200. __found_pages:
  201. /* create a new memory block */
  202. blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
  203. if (blk == NULL)
  204. return NULL;
  205. blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
  206. emu10k1_memblk_init(blk);
  207. return blk;
  208. }
  209. /*
  210. * check if the given pointer is valid for pages
  211. */
  212. static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
  213. {
  214. if (addr & ~emu->dma_mask) {
  215. snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr);
  216. return 0;
  217. }
  218. if (addr & (EMUPAGESIZE-1)) {
  219. snd_printk(KERN_ERR "page is not aligned\n");
  220. return 0;
  221. }
  222. return 1;
  223. }
  224. /*
  225. * map the given memory block on PTB.
  226. * if the block is already mapped, update the link order.
  227. * if no empty pages are found, tries to release unsed memory blocks
  228. * and retry the mapping.
  229. */
  230. int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  231. {
  232. int err;
  233. int size;
  234. struct list_head *p, *nextp;
  235. struct snd_emu10k1_memblk *deleted;
  236. unsigned long flags;
  237. spin_lock_irqsave(&emu->memblk_lock, flags);
  238. if (blk->mapped_page >= 0) {
  239. /* update order link */
  240. list_del(&blk->mapped_order_link);
  241. list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
  242. spin_unlock_irqrestore(&emu->memblk_lock, flags);
  243. return 0;
  244. }
  245. if ((err = map_memblk(emu, blk)) < 0) {
  246. /* no enough page - try to unmap some blocks */
  247. /* starting from the oldest block */
  248. p = emu->mapped_order_link_head.next;
  249. for (; p != &emu->mapped_order_link_head; p = nextp) {
  250. nextp = p->next;
  251. deleted = get_emu10k1_memblk(p, mapped_order_link);
  252. if (deleted->map_locked)
  253. continue;
  254. size = unmap_memblk(emu, deleted);
  255. if (size >= blk->pages) {
  256. /* ok the empty region is enough large */
  257. err = map_memblk(emu, blk);
  258. break;
  259. }
  260. }
  261. }
  262. spin_unlock_irqrestore(&emu->memblk_lock, flags);
  263. return err;
  264. }
  265. EXPORT_SYMBOL(snd_emu10k1_memblk_map);
  266. /*
  267. * page allocation for DMA
  268. */
  269. struct snd_util_memblk *
  270. snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
  271. {
  272. struct snd_pcm_runtime *runtime = substream->runtime;
  273. struct snd_util_memhdr *hdr;
  274. struct snd_emu10k1_memblk *blk;
  275. int page, err, idx;
  276. if (snd_BUG_ON(!emu))
  277. return NULL;
  278. if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
  279. runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
  280. return NULL;
  281. hdr = emu->memhdr;
  282. if (snd_BUG_ON(!hdr))
  283. return NULL;
  284. idx = runtime->period_size >= runtime->buffer_size ?
  285. (emu->delay_pcm_irq * 2) : 0;
  286. mutex_lock(&hdr->block_mutex);
  287. blk = search_empty(emu, runtime->dma_bytes + idx);
  288. if (blk == NULL) {
  289. mutex_unlock(&hdr->block_mutex);
  290. return NULL;
  291. }
  292. /* fill buffer addresses but pointers are not stored so that
  293. * snd_free_pci_page() is not called in in synth_free()
  294. */
  295. idx = 0;
  296. for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
  297. unsigned long ofs = idx << PAGE_SHIFT;
  298. dma_addr_t addr;
  299. addr = snd_pcm_sgbuf_get_addr(substream, ofs);
  300. if (! is_valid_page(emu, addr)) {
  301. printk(KERN_ERR "emu: failure page = %d\n", idx);
  302. mutex_unlock(&hdr->block_mutex);
  303. return NULL;
  304. }
  305. emu->page_addr_table[page] = addr;
  306. emu->page_ptr_table[page] = NULL;
  307. }
  308. /* set PTB entries */
  309. blk->map_locked = 1; /* do not unmap this block! */
  310. err = snd_emu10k1_memblk_map(emu, blk);
  311. if (err < 0) {
  312. __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
  313. mutex_unlock(&hdr->block_mutex);
  314. return NULL;
  315. }
  316. mutex_unlock(&hdr->block_mutex);
  317. return (struct snd_util_memblk *)blk;
  318. }
  319. /*
  320. * release DMA buffer from page table
  321. */
  322. int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
  323. {
  324. if (snd_BUG_ON(!emu || !blk))
  325. return -EINVAL;
  326. return snd_emu10k1_synth_free(emu, blk);
  327. }
  328. /*
  329. * memory allocation using multiple pages (for synth)
  330. * Unlike the DMA allocation above, non-contiguous pages are assined.
  331. */
  332. /*
  333. * allocate a synth sample area
  334. */
  335. struct snd_util_memblk *
  336. snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
  337. {
  338. struct snd_emu10k1_memblk *blk;
  339. struct snd_util_memhdr *hdr = hw->memhdr;
  340. mutex_lock(&hdr->block_mutex);
  341. blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
  342. if (blk == NULL) {
  343. mutex_unlock(&hdr->block_mutex);
  344. return NULL;
  345. }
  346. if (synth_alloc_pages(hw, blk)) {
  347. __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
  348. mutex_unlock(&hdr->block_mutex);
  349. return NULL;
  350. }
  351. snd_emu10k1_memblk_map(hw, blk);
  352. mutex_unlock(&hdr->block_mutex);
  353. return (struct snd_util_memblk *)blk;
  354. }
  355. EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
  356. /*
  357. * free a synth sample area
  358. */
  359. int
  360. snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
  361. {
  362. struct snd_util_memhdr *hdr = emu->memhdr;
  363. struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
  364. unsigned long flags;
  365. mutex_lock(&hdr->block_mutex);
  366. spin_lock_irqsave(&emu->memblk_lock, flags);
  367. if (blk->mapped_page >= 0)
  368. unmap_memblk(emu, blk);
  369. spin_unlock_irqrestore(&emu->memblk_lock, flags);
  370. synth_free_pages(emu, blk);
  371. __snd_util_mem_free(hdr, memblk);
  372. mutex_unlock(&hdr->block_mutex);
  373. return 0;
  374. }
  375. EXPORT_SYMBOL(snd_emu10k1_synth_free);
  376. /* check new allocation range */
  377. static void get_single_page_range(struct snd_util_memhdr *hdr,
  378. struct snd_emu10k1_memblk *blk,
  379. int *first_page_ret, int *last_page_ret)
  380. {
  381. struct list_head *p;
  382. struct snd_emu10k1_memblk *q;
  383. int first_page, last_page;
  384. first_page = blk->first_page;
  385. if ((p = blk->mem.list.prev) != &hdr->block) {
  386. q = get_emu10k1_memblk(p, mem.list);
  387. if (q->last_page == first_page)
  388. first_page++; /* first page was already allocated */
  389. }
  390. last_page = blk->last_page;
  391. if ((p = blk->mem.list.next) != &hdr->block) {
  392. q = get_emu10k1_memblk(p, mem.list);
  393. if (q->first_page == last_page)
  394. last_page--; /* last page was already allocated */
  395. }
  396. *first_page_ret = first_page;
  397. *last_page_ret = last_page;
  398. }
  399. /* release allocated pages */
  400. static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
  401. int last_page)
  402. {
  403. int page;
  404. for (page = first_page; page <= last_page; page++) {
  405. free_page((unsigned long)emu->page_ptr_table[page]);
  406. emu->page_addr_table[page] = 0;
  407. emu->page_ptr_table[page] = NULL;
  408. }
  409. }
  410. /*
  411. * allocate kernel pages
  412. */
  413. static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  414. {
  415. int page, first_page, last_page;
  416. emu10k1_memblk_init(blk);
  417. get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
  418. /* allocate kernel pages */
  419. for (page = first_page; page <= last_page; page++) {
  420. /* first try to allocate from <4GB zone */
  421. struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 |
  422. __GFP_NOWARN);
  423. if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) {
  424. if (p)
  425. __free_page(p);
  426. /* try to allocate from <16MB zone */
  427. p = alloc_page(GFP_ATOMIC | GFP_DMA |
  428. __GFP_NORETRY | /* no OOM-killer */
  429. __GFP_NOWARN);
  430. }
  431. if (!p) {
  432. __synth_free_pages(emu, first_page, page - 1);
  433. return -ENOMEM;
  434. }
  435. emu->page_addr_table[page] = page_to_phys(p);
  436. emu->page_ptr_table[page] = page_address(p);
  437. }
  438. return 0;
  439. }
  440. /*
  441. * free pages
  442. */
  443. static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  444. {
  445. int first_page, last_page;
  446. get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
  447. __synth_free_pages(emu, first_page, last_page);
  448. return 0;
  449. }
  450. /* calculate buffer pointer from offset address */
  451. static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
  452. {
  453. char *ptr;
  454. if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
  455. return NULL;
  456. ptr = emu->page_ptr_table[page];
  457. if (! ptr) {
  458. printk(KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page);
  459. return NULL;
  460. }
  461. ptr += offset & (PAGE_SIZE - 1);
  462. return (void*)ptr;
  463. }
  464. /*
  465. * bzero(blk + offset, size)
  466. */
  467. int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
  468. int offset, int size)
  469. {
  470. int page, nextofs, end_offset, temp, temp1;
  471. void *ptr;
  472. struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
  473. offset += blk->offset & (PAGE_SIZE - 1);
  474. end_offset = offset + size;
  475. page = get_aligned_page(offset);
  476. do {
  477. nextofs = aligned_page_offset(page + 1);
  478. temp = nextofs - offset;
  479. temp1 = end_offset - offset;
  480. if (temp1 < temp)
  481. temp = temp1;
  482. ptr = offset_ptr(emu, page + p->first_page, offset);
  483. if (ptr)
  484. memset(ptr, 0, temp);
  485. offset = nextofs;
  486. page++;
  487. } while (offset < end_offset);
  488. return 0;
  489. }
  490. EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
  491. /*
  492. * copy_from_user(blk + offset, data, size)
  493. */
  494. int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
  495. int offset, const char __user *data, int size)
  496. {
  497. int page, nextofs, end_offset, temp, temp1;
  498. void *ptr;
  499. struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
  500. offset += blk->offset & (PAGE_SIZE - 1);
  501. end_offset = offset + size;
  502. page = get_aligned_page(offset);
  503. do {
  504. nextofs = aligned_page_offset(page + 1);
  505. temp = nextofs - offset;
  506. temp1 = end_offset - offset;
  507. if (temp1 < temp)
  508. temp = temp1;
  509. ptr = offset_ptr(emu, page + p->first_page, offset);
  510. if (ptr && copy_from_user(ptr, data, temp))
  511. return -EFAULT;
  512. offset = nextofs;
  513. data += temp;
  514. page++;
  515. } while (offset < end_offset);
  516. return 0;
  517. }
  518. EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);