core-iso.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. /*
  2. * Isochronous I/O functionality:
  3. * - Isochronous DMA context management
  4. * - Isochronous bus resource management (channels, bandwidth), client side
  5. *
  6. * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  21. */
  22. #include <linux/dma-mapping.h>
  23. #include <linux/errno.h>
  24. #include <linux/firewire.h>
  25. #include <linux/firewire-constants.h>
  26. #include <linux/kernel.h>
  27. #include <linux/mm.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/vmalloc.h>
  30. #include <asm/byteorder.h>
  31. #include "core.h"
  32. /*
  33. * Isochronous DMA context management
  34. */
  35. int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
  36. int page_count, enum dma_data_direction direction)
  37. {
  38. int i, j;
  39. dma_addr_t address;
  40. buffer->page_count = page_count;
  41. buffer->direction = direction;
  42. buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
  43. GFP_KERNEL);
  44. if (buffer->pages == NULL)
  45. goto out;
  46. for (i = 0; i < buffer->page_count; i++) {
  47. buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  48. if (buffer->pages[i] == NULL)
  49. goto out_pages;
  50. address = dma_map_page(card->device, buffer->pages[i],
  51. 0, PAGE_SIZE, direction);
  52. if (dma_mapping_error(card->device, address)) {
  53. __free_page(buffer->pages[i]);
  54. goto out_pages;
  55. }
  56. set_page_private(buffer->pages[i], address);
  57. }
  58. return 0;
  59. out_pages:
  60. for (j = 0; j < i; j++) {
  61. address = page_private(buffer->pages[j]);
  62. dma_unmap_page(card->device, address,
  63. PAGE_SIZE, direction);
  64. __free_page(buffer->pages[j]);
  65. }
  66. kfree(buffer->pages);
  67. out:
  68. buffer->pages = NULL;
  69. return -ENOMEM;
  70. }
  71. EXPORT_SYMBOL(fw_iso_buffer_init);
  72. int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
  73. {
  74. unsigned long uaddr;
  75. int i, err;
  76. uaddr = vma->vm_start;
  77. for (i = 0; i < buffer->page_count; i++) {
  78. err = vm_insert_page(vma, uaddr, buffer->pages[i]);
  79. if (err)
  80. return err;
  81. uaddr += PAGE_SIZE;
  82. }
  83. return 0;
  84. }
  85. void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
  86. struct fw_card *card)
  87. {
  88. int i;
  89. dma_addr_t address;
  90. for (i = 0; i < buffer->page_count; i++) {
  91. address = page_private(buffer->pages[i]);
  92. dma_unmap_page(card->device, address,
  93. PAGE_SIZE, buffer->direction);
  94. __free_page(buffer->pages[i]);
  95. }
  96. kfree(buffer->pages);
  97. buffer->pages = NULL;
  98. }
  99. EXPORT_SYMBOL(fw_iso_buffer_destroy);
  100. /* Convert DMA address to offset into virtually contiguous buffer. */
  101. size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
  102. {
  103. int i;
  104. dma_addr_t address;
  105. ssize_t offset;
  106. for (i = 0; i < buffer->page_count; i++) {
  107. address = page_private(buffer->pages[i]);
  108. offset = (ssize_t)completed - (ssize_t)address;
  109. if (offset > 0 && offset <= PAGE_SIZE)
  110. return (i << PAGE_SHIFT) + offset;
  111. }
  112. return 0;
  113. }
  114. struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
  115. int type, int channel, int speed, size_t header_size,
  116. fw_iso_callback_t callback, void *callback_data)
  117. {
  118. struct fw_iso_context *ctx;
  119. ctx = card->driver->allocate_iso_context(card,
  120. type, channel, header_size);
  121. if (IS_ERR(ctx))
  122. return ctx;
  123. ctx->card = card;
  124. ctx->type = type;
  125. ctx->channel = channel;
  126. ctx->speed = speed;
  127. ctx->header_size = header_size;
  128. ctx->callback.sc = callback;
  129. ctx->callback_data = callback_data;
  130. return ctx;
  131. }
  132. EXPORT_SYMBOL(fw_iso_context_create);
  133. void fw_iso_context_destroy(struct fw_iso_context *ctx)
  134. {
  135. ctx->card->driver->free_iso_context(ctx);
  136. }
  137. EXPORT_SYMBOL(fw_iso_context_destroy);
  138. int fw_iso_context_start(struct fw_iso_context *ctx,
  139. int cycle, int sync, int tags)
  140. {
  141. return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
  142. }
  143. EXPORT_SYMBOL(fw_iso_context_start);
  144. int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels)
  145. {
  146. return ctx->card->driver->set_iso_channels(ctx, channels);
  147. }
  148. int fw_iso_context_queue(struct fw_iso_context *ctx,
  149. struct fw_iso_packet *packet,
  150. struct fw_iso_buffer *buffer,
  151. unsigned long payload)
  152. {
  153. return ctx->card->driver->queue_iso(ctx, packet, buffer, payload);
  154. }
  155. EXPORT_SYMBOL(fw_iso_context_queue);
  156. int fw_iso_context_stop(struct fw_iso_context *ctx)
  157. {
  158. return ctx->card->driver->stop_iso(ctx);
  159. }
  160. EXPORT_SYMBOL(fw_iso_context_stop);
  161. /*
  162. * Isochronous bus resource management (channels, bandwidth), client side
  163. */
  164. static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
  165. int bandwidth, bool allocate, __be32 data[2])
  166. {
  167. int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
  168. /*
  169. * On a 1394a IRM with low contention, try < 1 is enough.
  170. * On a 1394-1995 IRM, we need at least try < 2.
  171. * Let's just do try < 5.
  172. */
  173. for (try = 0; try < 5; try++) {
  174. new = allocate ? old - bandwidth : old + bandwidth;
  175. if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
  176. break;
  177. data[0] = cpu_to_be32(old);
  178. data[1] = cpu_to_be32(new);
  179. switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
  180. irm_id, generation, SCODE_100,
  181. CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
  182. data, 8)) {
  183. case RCODE_GENERATION:
  184. /* A generation change frees all bandwidth. */
  185. return allocate ? -EAGAIN : bandwidth;
  186. case RCODE_COMPLETE:
  187. if (be32_to_cpup(data) == old)
  188. return bandwidth;
  189. old = be32_to_cpup(data);
  190. /* Fall through. */
  191. }
  192. }
  193. return -EIO;
  194. }
  195. static int manage_channel(struct fw_card *card, int irm_id, int generation,
  196. u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
  197. {
  198. __be32 c, all, old;
  199. int i, retry = 5;
  200. old = all = allocate ? cpu_to_be32(~0) : 0;
  201. for (i = 0; i < 32; i++) {
  202. if (!(channels_mask & 1 << i))
  203. continue;
  204. c = cpu_to_be32(1 << (31 - i));
  205. if ((old & c) != (all & c))
  206. continue;
  207. data[0] = old;
  208. data[1] = old ^ c;
  209. switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
  210. irm_id, generation, SCODE_100,
  211. offset, data, 8)) {
  212. case RCODE_GENERATION:
  213. /* A generation change frees all channels. */
  214. return allocate ? -EAGAIN : i;
  215. case RCODE_COMPLETE:
  216. if (data[0] == old)
  217. return i;
  218. old = data[0];
  219. /* Is the IRM 1394a-2000 compliant? */
  220. if ((data[0] & c) == (data[1] & c))
  221. continue;
  222. /* 1394-1995 IRM, fall through to retry. */
  223. default:
  224. if (retry--)
  225. i--;
  226. }
  227. }
  228. return -EIO;
  229. }
  230. static void deallocate_channel(struct fw_card *card, int irm_id,
  231. int generation, int channel, __be32 buffer[2])
  232. {
  233. u32 mask;
  234. u64 offset;
  235. mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
  236. offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
  237. CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
  238. manage_channel(card, irm_id, generation, mask, offset, false, buffer);
  239. }
  240. /**
  241. * fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth
  242. *
  243. * In parameters: card, generation, channels_mask, bandwidth, allocate
  244. * Out parameters: channel, bandwidth
  245. * This function blocks (sleeps) during communication with the IRM.
  246. *
  247. * Allocates or deallocates at most one channel out of channels_mask.
  248. * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
  249. * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
  250. * channel 0 and LSB for channel 63.)
  251. * Allocates or deallocates as many bandwidth allocation units as specified.
  252. *
  253. * Returns channel < 0 if no channel was allocated or deallocated.
  254. * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
  255. *
  256. * If generation is stale, deallocations succeed but allocations fail with
  257. * channel = -EAGAIN.
  258. *
  259. * If channel allocation fails, no bandwidth will be allocated either.
  260. * If bandwidth allocation fails, no channel will be allocated either.
  261. * But deallocations of channel and bandwidth are tried independently
  262. * of each other's success.
  263. */
  264. void fw_iso_resource_manage(struct fw_card *card, int generation,
  265. u64 channels_mask, int *channel, int *bandwidth,
  266. bool allocate, __be32 buffer[2])
  267. {
  268. u32 channels_hi = channels_mask; /* channels 31...0 */
  269. u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
  270. int irm_id, ret, c = -EINVAL;
  271. spin_lock_irq(&card->lock);
  272. irm_id = card->irm_node->node_id;
  273. spin_unlock_irq(&card->lock);
  274. if (channels_hi)
  275. c = manage_channel(card, irm_id, generation, channels_hi,
  276. CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI,
  277. allocate, buffer);
  278. if (channels_lo && c < 0) {
  279. c = manage_channel(card, irm_id, generation, channels_lo,
  280. CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO,
  281. allocate, buffer);
  282. if (c >= 0)
  283. c += 32;
  284. }
  285. *channel = c;
  286. if (allocate && channels_mask != 0 && c < 0)
  287. *bandwidth = 0;
  288. if (*bandwidth == 0)
  289. return;
  290. ret = manage_bandwidth(card, irm_id, generation, *bandwidth,
  291. allocate, buffer);
  292. if (ret < 0)
  293. *bandwidth = 0;
  294. if (allocate && ret < 0) {
  295. if (c >= 0)
  296. deallocate_channel(card, irm_id, generation, c, buffer);
  297. *channel = ret;
  298. }
  299. }