dma.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519
  1. /*****************************************************************************
  2. * Copyright 2004 - 2008 Broadcom Corporation. All rights reserved.
  3. *
  4. * Unless you and Broadcom execute a separate written software license
  5. * agreement governing use of this software, this software is licensed to you
  6. * under the terms of the GNU General Public License version 2, available at
  7. * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
  8. *
  9. * Notwithstanding the above, under no circumstances may you combine this
  10. * software in any way with any other Broadcom software provided under a
  11. * license other than the GPL, without Broadcom's express prior written
  12. * consent.
  13. *****************************************************************************/
  14. /****************************************************************************/
  15. /**
  16. * @file dma.c
  17. *
  18. * @brief Implements the DMA interface.
  19. */
  20. /****************************************************************************/
  21. /* ---- Include Files ---------------------------------------------------- */
  22. #include <linux/module.h>
  23. #include <linux/device.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/sched.h>
  27. #include <linux/irqreturn.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/slab.h>
  30. #include <mach/timer.h>
  31. #include <linux/pfn.h>
  32. #include <linux/atomic.h>
  33. #include <linux/sched.h>
  34. #include <mach/dma.h>
  35. /* ---- Public Variables ------------------------------------------------- */
  36. /* ---- Private Constants and Types -------------------------------------- */
  37. #define MAKE_HANDLE(controllerIdx, channelIdx) (((controllerIdx) << 4) | (channelIdx))
  38. #define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f)
  39. #define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f)
  40. /* ---- Private Variables ------------------------------------------------ */
  41. static DMA_Global_t gDMA;
  42. static struct proc_dir_entry *gDmaDir;
  43. #include "dma_device.c"
  44. /* ---- Private Function Prototypes -------------------------------------- */
  45. /* ---- Functions ------------------------------------------------------- */
  46. /****************************************************************************/
  47. /**
  48. * Displays information for /proc/dma/channels
  49. */
  50. /****************************************************************************/
  51. static int dma_proc_read_channels(char *buf, char **start, off_t offset,
  52. int count, int *eof, void *data)
  53. {
  54. int controllerIdx;
  55. int channelIdx;
  56. int limit = count - 200;
  57. int len = 0;
  58. DMA_Channel_t *channel;
  59. if (down_interruptible(&gDMA.lock) < 0) {
  60. return -ERESTARTSYS;
  61. }
  62. for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
  63. controllerIdx++) {
  64. for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
  65. channelIdx++) {
  66. if (len >= limit) {
  67. break;
  68. }
  69. channel =
  70. &gDMA.controller[controllerIdx].channel[channelIdx];
  71. len +=
  72. sprintf(buf + len, "%d:%d ", controllerIdx,
  73. channelIdx);
  74. if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) !=
  75. 0) {
  76. len +=
  77. sprintf(buf + len, "Dedicated for %s ",
  78. DMA_gDeviceAttribute[channel->
  79. devType].name);
  80. } else {
  81. len += sprintf(buf + len, "Shared ");
  82. }
  83. if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) != 0) {
  84. len += sprintf(buf + len, "No ISR ");
  85. }
  86. if ((channel->flags & DMA_CHANNEL_FLAG_LARGE_FIFO) != 0) {
  87. len += sprintf(buf + len, "Fifo: 128 ");
  88. } else {
  89. len += sprintf(buf + len, "Fifo: 64 ");
  90. }
  91. if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) {
  92. len +=
  93. sprintf(buf + len, "InUse by %s",
  94. DMA_gDeviceAttribute[channel->
  95. devType].name);
  96. #if (DMA_DEBUG_TRACK_RESERVATION)
  97. len +=
  98. sprintf(buf + len, " (%s:%d)",
  99. channel->fileName,
  100. channel->lineNum);
  101. #endif
  102. } else {
  103. len += sprintf(buf + len, "Avail ");
  104. }
  105. if (channel->lastDevType != DMA_DEVICE_NONE) {
  106. len +=
  107. sprintf(buf + len, "Last use: %s ",
  108. DMA_gDeviceAttribute[channel->
  109. lastDevType].
  110. name);
  111. }
  112. len += sprintf(buf + len, "\n");
  113. }
  114. }
  115. up(&gDMA.lock);
  116. *eof = 1;
  117. return len;
  118. }
  119. /****************************************************************************/
  120. /**
  121. * Displays information for /proc/dma/devices
  122. */
  123. /****************************************************************************/
  124. static int dma_proc_read_devices(char *buf, char **start, off_t offset,
  125. int count, int *eof, void *data)
  126. {
  127. int limit = count - 200;
  128. int len = 0;
  129. int devIdx;
  130. if (down_interruptible(&gDMA.lock) < 0) {
  131. return -ERESTARTSYS;
  132. }
  133. for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) {
  134. DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx];
  135. if (devAttr->name == NULL) {
  136. continue;
  137. }
  138. if (len >= limit) {
  139. break;
  140. }
  141. len += sprintf(buf + len, "%-12s ", devAttr->name);
  142. if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
  143. len +=
  144. sprintf(buf + len, "Dedicated %d:%d ",
  145. devAttr->dedicatedController,
  146. devAttr->dedicatedChannel);
  147. } else {
  148. len += sprintf(buf + len, "Shared DMA:");
  149. if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA0) != 0) {
  150. len += sprintf(buf + len, "0");
  151. }
  152. if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA1) != 0) {
  153. len += sprintf(buf + len, "1");
  154. }
  155. len += sprintf(buf + len, " ");
  156. }
  157. if ((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0) {
  158. len += sprintf(buf + len, "NoISR ");
  159. }
  160. if ((devAttr->flags & DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO) != 0) {
  161. len += sprintf(buf + len, "Allow-128 ");
  162. }
  163. len +=
  164. sprintf(buf + len,
  165. "Xfer #: %Lu Ticks: %Lu Bytes: %Lu DescLen: %u\n",
  166. devAttr->numTransfers, devAttr->transferTicks,
  167. devAttr->transferBytes,
  168. devAttr->ring.bytesAllocated);
  169. }
  170. up(&gDMA.lock);
  171. *eof = 1;
  172. return len;
  173. }
  174. /****************************************************************************/
  175. /**
  176. * Determines if a DMA_Device_t is "valid".
  177. *
  178. * @return
  179. * TRUE - dma device is valid
  180. * FALSE - dma device isn't valid
  181. */
  182. /****************************************************************************/
  183. static inline int IsDeviceValid(DMA_Device_t device)
  184. {
  185. return (device >= 0) && (device < DMA_NUM_DEVICE_ENTRIES);
  186. }
  187. /****************************************************************************/
  188. /**
  189. * Translates a DMA handle into a pointer to a channel.
  190. *
  191. * @return
  192. * non-NULL - pointer to DMA_Channel_t
  193. * NULL - DMA Handle was invalid
  194. */
  195. /****************************************************************************/
  196. static inline DMA_Channel_t *HandleToChannel(DMA_Handle_t handle)
  197. {
  198. int controllerIdx;
  199. int channelIdx;
  200. controllerIdx = CONTROLLER_FROM_HANDLE(handle);
  201. channelIdx = CHANNEL_FROM_HANDLE(handle);
  202. if ((controllerIdx > DMA_NUM_CONTROLLERS)
  203. || (channelIdx > DMA_NUM_CHANNELS)) {
  204. return NULL;
  205. }
  206. return &gDMA.controller[controllerIdx].channel[channelIdx];
  207. }
  208. /****************************************************************************/
  209. /**
  210. * Interrupt handler which is called to process DMA interrupts.
  211. */
  212. /****************************************************************************/
  213. static irqreturn_t dma_interrupt_handler(int irq, void *dev_id)
  214. {
  215. DMA_Channel_t *channel;
  216. DMA_DeviceAttribute_t *devAttr;
  217. int irqStatus;
  218. channel = (DMA_Channel_t *) dev_id;
  219. /* Figure out why we were called, and knock down the interrupt */
  220. irqStatus = dmacHw_getInterruptStatus(channel->dmacHwHandle);
  221. dmacHw_clearInterrupt(channel->dmacHwHandle);
  222. if ((channel->devType < 0)
  223. || (channel->devType > DMA_NUM_DEVICE_ENTRIES)) {
  224. printk(KERN_ERR "dma_interrupt_handler: Invalid devType: %d\n",
  225. channel->devType);
  226. return IRQ_NONE;
  227. }
  228. devAttr = &DMA_gDeviceAttribute[channel->devType];
  229. /* Update stats */
  230. if ((irqStatus & dmacHw_INTERRUPT_STATUS_TRANS) != 0) {
  231. devAttr->transferTicks +=
  232. (timer_get_tick_count() - devAttr->transferStartTime);
  233. }
  234. if ((irqStatus & dmacHw_INTERRUPT_STATUS_ERROR) != 0) {
  235. printk(KERN_ERR
  236. "dma_interrupt_handler: devType :%d DMA error (%s)\n",
  237. channel->devType, devAttr->name);
  238. } else {
  239. devAttr->numTransfers++;
  240. devAttr->transferBytes += devAttr->numBytes;
  241. }
  242. /* Call any installed handler */
  243. if (devAttr->devHandler != NULL) {
  244. devAttr->devHandler(channel->devType, irqStatus,
  245. devAttr->userData);
  246. }
  247. return IRQ_HANDLED;
  248. }
  249. /****************************************************************************/
  250. /**
  251. * Allocates memory to hold a descriptor ring. The descriptor ring then
  252. * needs to be populated by making one or more calls to
  253. * dna_add_descriptors.
  254. *
  255. * The returned descriptor ring will be automatically initialized.
  256. *
  257. * @return
  258. * 0 Descriptor ring was allocated successfully
  259. * -EINVAL Invalid parameters passed in
  260. * -ENOMEM Unable to allocate memory for the desired number of descriptors.
  261. */
  262. /****************************************************************************/
  263. int dma_alloc_descriptor_ring(DMA_DescriptorRing_t *ring, /* Descriptor ring to populate */
  264. int numDescriptors /* Number of descriptors that need to be allocated. */
  265. ) {
  266. size_t bytesToAlloc = dmacHw_descriptorLen(numDescriptors);
  267. if ((ring == NULL) || (numDescriptors <= 0)) {
  268. return -EINVAL;
  269. }
  270. ring->physAddr = 0;
  271. ring->descriptorsAllocated = 0;
  272. ring->bytesAllocated = 0;
  273. ring->virtAddr = dma_alloc_writecombine(NULL,
  274. bytesToAlloc,
  275. &ring->physAddr,
  276. GFP_KERNEL);
  277. if (ring->virtAddr == NULL) {
  278. return -ENOMEM;
  279. }
  280. ring->bytesAllocated = bytesToAlloc;
  281. ring->descriptorsAllocated = numDescriptors;
  282. return dma_init_descriptor_ring(ring, numDescriptors);
  283. }
  284. EXPORT_SYMBOL(dma_alloc_descriptor_ring);
  285. /****************************************************************************/
  286. /**
  287. * Releases the memory which was previously allocated for a descriptor ring.
  288. */
  289. /****************************************************************************/
  290. void dma_free_descriptor_ring(DMA_DescriptorRing_t *ring /* Descriptor to release */
  291. ) {
  292. if (ring->virtAddr != NULL) {
  293. dma_free_writecombine(NULL,
  294. ring->bytesAllocated,
  295. ring->virtAddr, ring->physAddr);
  296. }
  297. ring->bytesAllocated = 0;
  298. ring->descriptorsAllocated = 0;
  299. ring->virtAddr = NULL;
  300. ring->physAddr = 0;
  301. }
  302. EXPORT_SYMBOL(dma_free_descriptor_ring);
  303. /****************************************************************************/
  304. /**
  305. * Initializes a descriptor ring, so that descriptors can be added to it.
  306. * Once a descriptor ring has been allocated, it may be reinitialized for
  307. * use with additional/different regions of memory.
  308. *
  309. * Note that if 7 descriptors are allocated, it's perfectly acceptable to
  310. * initialize the ring with a smaller number of descriptors. The amount
  311. * of memory allocated for the descriptor ring will not be reduced, and
  312. * the descriptor ring may be reinitialized later
  313. *
  314. * @return
  315. * 0 Descriptor ring was initialized successfully
  316. * -ENOMEM The descriptor which was passed in has insufficient space
  317. * to hold the desired number of descriptors.
  318. */
  319. /****************************************************************************/
  320. int dma_init_descriptor_ring(DMA_DescriptorRing_t *ring, /* Descriptor ring to initialize */
  321. int numDescriptors /* Number of descriptors to initialize. */
  322. ) {
  323. if (ring->virtAddr == NULL) {
  324. return -EINVAL;
  325. }
  326. if (dmacHw_initDescriptor(ring->virtAddr,
  327. ring->physAddr,
  328. ring->bytesAllocated, numDescriptors) < 0) {
  329. printk(KERN_ERR
  330. "dma_init_descriptor_ring: dmacHw_initDescriptor failed\n");
  331. return -ENOMEM;
  332. }
  333. return 0;
  334. }
  335. EXPORT_SYMBOL(dma_init_descriptor_ring);
  336. /****************************************************************************/
  337. /**
  338. * Determines the number of descriptors which would be required for a
  339. * transfer of the indicated memory region.
  340. *
  341. * This function also needs to know which DMA device this transfer will
  342. * be destined for, so that the appropriate DMA configuration can be retrieved.
  343. * DMA parameters such as transfer width, and whether this is a memory-to-memory
  344. * or memory-to-peripheral, etc can all affect the actual number of descriptors
  345. * required.
  346. *
  347. * @return
  348. * > 0 Returns the number of descriptors required for the indicated transfer
  349. * -ENODEV - Device handed in is invalid.
  350. * -EINVAL Invalid parameters
  351. * -ENOMEM Memory exhausted
  352. */
  353. /****************************************************************************/
  354. int dma_calculate_descriptor_count(DMA_Device_t device, /* DMA Device that this will be associated with */
  355. dma_addr_t srcData, /* Place to get data to write to device */
  356. dma_addr_t dstData, /* Pointer to device data address */
  357. size_t numBytes /* Number of bytes to transfer to the device */
  358. ) {
  359. int numDescriptors;
  360. DMA_DeviceAttribute_t *devAttr;
  361. if (!IsDeviceValid(device)) {
  362. return -ENODEV;
  363. }
  364. devAttr = &DMA_gDeviceAttribute[device];
  365. numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config,
  366. (void *)srcData,
  367. (void *)dstData,
  368. numBytes);
  369. if (numDescriptors < 0) {
  370. printk(KERN_ERR
  371. "dma_calculate_descriptor_count: dmacHw_calculateDescriptorCount failed\n");
  372. return -EINVAL;
  373. }
  374. return numDescriptors;
  375. }
  376. EXPORT_SYMBOL(dma_calculate_descriptor_count);
  377. /****************************************************************************/
  378. /**
  379. * Adds a region of memory to the descriptor ring. Note that it may take
  380. * multiple descriptors for each region of memory. It is the callers
  381. * responsibility to allocate a sufficiently large descriptor ring.
  382. *
  383. * @return
  384. * 0 Descriptors were added successfully
  385. * -ENODEV Device handed in is invalid.
  386. * -EINVAL Invalid parameters
  387. * -ENOMEM Memory exhausted
  388. */
  389. /****************************************************************************/
  390. int dma_add_descriptors(DMA_DescriptorRing_t *ring, /* Descriptor ring to add descriptors to */
  391. DMA_Device_t device, /* DMA Device that descriptors are for */
  392. dma_addr_t srcData, /* Place to get data (memory or device) */
  393. dma_addr_t dstData, /* Place to put data (memory or device) */
  394. size_t numBytes /* Number of bytes to transfer to the device */
  395. ) {
  396. int rc;
  397. DMA_DeviceAttribute_t *devAttr;
  398. if (!IsDeviceValid(device)) {
  399. return -ENODEV;
  400. }
  401. devAttr = &DMA_gDeviceAttribute[device];
  402. rc = dmacHw_setDataDescriptor(&devAttr->config,
  403. ring->virtAddr,
  404. (void *)srcData,
  405. (void *)dstData, numBytes);
  406. if (rc < 0) {
  407. printk(KERN_ERR
  408. "dma_add_descriptors: dmacHw_setDataDescriptor failed with code: %d\n",
  409. rc);
  410. return -ENOMEM;
  411. }
  412. return 0;
  413. }
  414. EXPORT_SYMBOL(dma_add_descriptors);
  415. /****************************************************************************/
  416. /**
  417. * Sets the descriptor ring associated with a device.
  418. *
  419. * Once set, the descriptor ring will be associated with the device, even
  420. * across channel request/free calls. Passing in a NULL descriptor ring
  421. * will release any descriptor ring currently associated with the device.
  422. *
  423. * Note: If you call dma_transfer, or one of the other dma_alloc_ functions
  424. * the descriptor ring may be released and reallocated.
  425. *
  426. * Note: This function will release the descriptor memory for any current
  427. * descriptor ring associated with this device.
  428. *
  429. * @return
  430. * 0 Descriptors were added successfully
  431. * -ENODEV Device handed in is invalid.
  432. */
  433. /****************************************************************************/
  434. int dma_set_device_descriptor_ring(DMA_Device_t device, /* Device to update the descriptor ring for. */
  435. DMA_DescriptorRing_t *ring /* Descriptor ring to add descriptors to */
  436. ) {
  437. DMA_DeviceAttribute_t *devAttr;
  438. if (!IsDeviceValid(device)) {
  439. return -ENODEV;
  440. }
  441. devAttr = &DMA_gDeviceAttribute[device];
  442. /* Free the previously allocated descriptor ring */
  443. dma_free_descriptor_ring(&devAttr->ring);
  444. if (ring != NULL) {
  445. /* Copy in the new one */
  446. devAttr->ring = *ring;
  447. }
  448. /* Set things up so that if dma_transfer is called then this descriptor */
  449. /* ring will get freed. */
  450. devAttr->prevSrcData = 0;
  451. devAttr->prevDstData = 0;
  452. devAttr->prevNumBytes = 0;
  453. return 0;
  454. }
  455. EXPORT_SYMBOL(dma_set_device_descriptor_ring);
  456. /****************************************************************************/
  457. /**
  458. * Retrieves the descriptor ring associated with a device.
  459. *
  460. * @return
  461. * 0 Descriptors were added successfully
  462. * -ENODEV Device handed in is invalid.
  463. */
  464. /****************************************************************************/
  465. int dma_get_device_descriptor_ring(DMA_Device_t device, /* Device to retrieve the descriptor ring for. */
  466. DMA_DescriptorRing_t *ring /* Place to store retrieved ring */
  467. ) {
  468. DMA_DeviceAttribute_t *devAttr;
  469. memset(ring, 0, sizeof(*ring));
  470. if (!IsDeviceValid(device)) {
  471. return -ENODEV;
  472. }
  473. devAttr = &DMA_gDeviceAttribute[device];
  474. *ring = devAttr->ring;
  475. return 0;
  476. }
  477. EXPORT_SYMBOL(dma_get_device_descriptor_ring);
  478. /****************************************************************************/
  479. /**
  480. * Configures a DMA channel.
  481. *
  482. * @return
  483. * >= 0 - Initialization was successful.
  484. *
  485. * -EBUSY - Device is currently being used.
  486. * -ENODEV - Device handed in is invalid.
  487. */
  488. /****************************************************************************/
  489. static int ConfigChannel(DMA_Handle_t handle)
  490. {
  491. DMA_Channel_t *channel;
  492. DMA_DeviceAttribute_t *devAttr;
  493. int controllerIdx;
  494. channel = HandleToChannel(handle);
  495. if (channel == NULL) {
  496. return -ENODEV;
  497. }
  498. devAttr = &DMA_gDeviceAttribute[channel->devType];
  499. controllerIdx = CONTROLLER_FROM_HANDLE(handle);
  500. if ((devAttr->flags & DMA_DEVICE_FLAG_PORT_PER_DMAC) != 0) {
  501. if (devAttr->config.transferType ==
  502. dmacHw_TRANSFER_TYPE_MEM_TO_PERIPHERAL) {
  503. devAttr->config.dstPeripheralPort =
  504. devAttr->dmacPort[controllerIdx];
  505. } else if (devAttr->config.transferType ==
  506. dmacHw_TRANSFER_TYPE_PERIPHERAL_TO_MEM) {
  507. devAttr->config.srcPeripheralPort =
  508. devAttr->dmacPort[controllerIdx];
  509. }
  510. }
  511. if (dmacHw_configChannel(channel->dmacHwHandle, &devAttr->config) != 0) {
  512. printk(KERN_ERR "ConfigChannel: dmacHw_configChannel failed\n");
  513. return -EIO;
  514. }
  515. return 0;
  516. }
  517. /****************************************************************************/
  518. /**
  519. * Initializes all of the data structures associated with the DMA.
  520. * @return
  521. * >= 0 - Initialization was successful.
  522. *
  523. * -EBUSY - Device is currently being used.
  524. * -ENODEV - Device handed in is invalid.
  525. */
  526. /****************************************************************************/
  527. int dma_init(void)
  528. {
  529. int rc = 0;
  530. int controllerIdx;
  531. int channelIdx;
  532. DMA_Device_t devIdx;
  533. DMA_Channel_t *channel;
  534. DMA_Handle_t dedicatedHandle;
  535. memset(&gDMA, 0, sizeof(gDMA));
  536. sema_init(&gDMA.lock, 0);
  537. init_waitqueue_head(&gDMA.freeChannelQ);
  538. /* Initialize the Hardware */
  539. dmacHw_initDma();
  540. /* Start off by marking all of the DMA channels as shared. */
  541. for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
  542. controllerIdx++) {
  543. for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
  544. channelIdx++) {
  545. channel =
  546. &gDMA.controller[controllerIdx].channel[channelIdx];
  547. channel->flags = 0;
  548. channel->devType = DMA_DEVICE_NONE;
  549. channel->lastDevType = DMA_DEVICE_NONE;
  550. #if (DMA_DEBUG_TRACK_RESERVATION)
  551. channel->fileName = "";
  552. channel->lineNum = 0;
  553. #endif
  554. channel->dmacHwHandle =
  555. dmacHw_getChannelHandle(dmacHw_MAKE_CHANNEL_ID
  556. (controllerIdx,
  557. channelIdx));
  558. dmacHw_initChannel(channel->dmacHwHandle);
  559. }
  560. }
  561. /* Record any special attributes that channels may have */
  562. gDMA.controller[0].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
  563. gDMA.controller[0].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
  564. gDMA.controller[1].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
  565. gDMA.controller[1].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
  566. /* Now walk through and record the dedicated channels. */
  567. for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) {
  568. DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx];
  569. if (((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0)
  570. && ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0)) {
  571. printk(KERN_ERR
  572. "DMA Device: %s Can only request NO_ISR for dedicated devices\n",
  573. devAttr->name);
  574. rc = -EINVAL;
  575. goto out;
  576. }
  577. if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
  578. /* This is a dedicated device. Mark the channel as being reserved. */
  579. if (devAttr->dedicatedController >= DMA_NUM_CONTROLLERS) {
  580. printk(KERN_ERR
  581. "DMA Device: %s DMA Controller %d is out of range\n",
  582. devAttr->name,
  583. devAttr->dedicatedController);
  584. rc = -EINVAL;
  585. goto out;
  586. }
  587. if (devAttr->dedicatedChannel >= DMA_NUM_CHANNELS) {
  588. printk(KERN_ERR
  589. "DMA Device: %s DMA Channel %d is out of range\n",
  590. devAttr->name,
  591. devAttr->dedicatedChannel);
  592. rc = -EINVAL;
  593. goto out;
  594. }
  595. dedicatedHandle =
  596. MAKE_HANDLE(devAttr->dedicatedController,
  597. devAttr->dedicatedChannel);
  598. channel = HandleToChannel(dedicatedHandle);
  599. if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) !=
  600. 0) {
  601. printk
  602. ("DMA Device: %s attempting to use same DMA Controller:Channel (%d:%d) as %s\n",
  603. devAttr->name,
  604. devAttr->dedicatedController,
  605. devAttr->dedicatedChannel,
  606. DMA_gDeviceAttribute[channel->devType].
  607. name);
  608. rc = -EBUSY;
  609. goto out;
  610. }
  611. channel->flags |= DMA_CHANNEL_FLAG_IS_DEDICATED;
  612. channel->devType = devIdx;
  613. if (devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) {
  614. channel->flags |= DMA_CHANNEL_FLAG_NO_ISR;
  615. }
  616. /* For dedicated channels, we can go ahead and configure the DMA channel now */
  617. /* as well. */
  618. ConfigChannel(dedicatedHandle);
  619. }
  620. }
  621. /* Go through and register the interrupt handlers */
  622. for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
  623. controllerIdx++) {
  624. for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
  625. channelIdx++) {
  626. channel =
  627. &gDMA.controller[controllerIdx].channel[channelIdx];
  628. if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) == 0) {
  629. snprintf(channel->name, sizeof(channel->name),
  630. "dma %d:%d %s", controllerIdx,
  631. channelIdx,
  632. channel->devType ==
  633. DMA_DEVICE_NONE ? "" :
  634. DMA_gDeviceAttribute[channel->devType].
  635. name);
  636. rc =
  637. request_irq(IRQ_DMA0C0 +
  638. (controllerIdx *
  639. DMA_NUM_CHANNELS) +
  640. channelIdx,
  641. dma_interrupt_handler,
  642. IRQF_DISABLED, channel->name,
  643. channel);
  644. if (rc != 0) {
  645. printk(KERN_ERR
  646. "request_irq for IRQ_DMA%dC%d failed\n",
  647. controllerIdx, channelIdx);
  648. }
  649. }
  650. }
  651. }
  652. /* Create /proc/dma/channels and /proc/dma/devices */
  653. gDmaDir = proc_mkdir("dma", NULL);
  654. if (gDmaDir == NULL) {
  655. printk(KERN_ERR "Unable to create /proc/dma\n");
  656. } else {
  657. create_proc_read_entry("channels", 0, gDmaDir,
  658. dma_proc_read_channels, NULL);
  659. create_proc_read_entry("devices", 0, gDmaDir,
  660. dma_proc_read_devices, NULL);
  661. }
  662. out:
  663. up(&gDMA.lock);
  664. return rc;
  665. }
  666. /****************************************************************************/
  667. /**
  668. * Reserves a channel for use with @a dev. If the device is setup to use
  669. * a shared channel, then this function will block until a free channel
  670. * becomes available.
  671. *
  672. * @return
  673. * >= 0 - A valid DMA Handle.
  674. * -EBUSY - Device is currently being used.
  675. * -ENODEV - Device handed in is invalid.
  676. */
  677. /****************************************************************************/
  678. #if (DMA_DEBUG_TRACK_RESERVATION)
  679. DMA_Handle_t dma_request_channel_dbg
  680. (DMA_Device_t dev, const char *fileName, int lineNum)
  681. #else
  682. DMA_Handle_t dma_request_channel(DMA_Device_t dev)
  683. #endif
  684. {
  685. DMA_Handle_t handle;
  686. DMA_DeviceAttribute_t *devAttr;
  687. DMA_Channel_t *channel;
  688. int controllerIdx;
  689. int controllerIdx2;
  690. int channelIdx;
  691. if (down_interruptible(&gDMA.lock) < 0) {
  692. return -ERESTARTSYS;
  693. }
  694. if ((dev < 0) || (dev >= DMA_NUM_DEVICE_ENTRIES)) {
  695. handle = -ENODEV;
  696. goto out;
  697. }
  698. devAttr = &DMA_gDeviceAttribute[dev];
  699. #if (DMA_DEBUG_TRACK_RESERVATION)
  700. {
  701. char *s;
  702. s = strrchr(fileName, '/');
  703. if (s != NULL) {
  704. fileName = s + 1;
  705. }
  706. }
  707. #endif
  708. if ((devAttr->flags & DMA_DEVICE_FLAG_IN_USE) != 0) {
  709. /* This device has already been requested and not been freed */
  710. printk(KERN_ERR "%s: device %s is already requested\n",
  711. __func__, devAttr->name);
  712. handle = -EBUSY;
  713. goto out;
  714. }
  715. if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
  716. /* This device has a dedicated channel. */
  717. channel =
  718. &gDMA.controller[devAttr->dedicatedController].
  719. channel[devAttr->dedicatedChannel];
  720. if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) {
  721. handle = -EBUSY;
  722. goto out;
  723. }
  724. channel->flags |= DMA_CHANNEL_FLAG_IN_USE;
  725. devAttr->flags |= DMA_DEVICE_FLAG_IN_USE;
  726. #if (DMA_DEBUG_TRACK_RESERVATION)
  727. channel->fileName = fileName;
  728. channel->lineNum = lineNum;
  729. #endif
  730. handle =
  731. MAKE_HANDLE(devAttr->dedicatedController,
  732. devAttr->dedicatedChannel);
  733. goto out;
  734. }
  735. /* This device needs to use one of the shared channels. */
  736. handle = DMA_INVALID_HANDLE;
  737. while (handle == DMA_INVALID_HANDLE) {
  738. /* Scan through the shared channels and see if one is available */
  739. for (controllerIdx2 = 0; controllerIdx2 < DMA_NUM_CONTROLLERS;
  740. controllerIdx2++) {
  741. /* Check to see if we should try on controller 1 first. */
  742. controllerIdx = controllerIdx2;
  743. if ((devAttr->
  744. flags & DMA_DEVICE_FLAG_ALLOC_DMA1_FIRST) != 0) {
  745. controllerIdx = 1 - controllerIdx;
  746. }
  747. /* See if the device is available on the controller being tested */
  748. if ((devAttr->
  749. flags & (DMA_DEVICE_FLAG_ON_DMA0 << controllerIdx))
  750. != 0) {
  751. for (channelIdx = 0;
  752. channelIdx < DMA_NUM_CHANNELS;
  753. channelIdx++) {
  754. channel =
  755. &gDMA.controller[controllerIdx].
  756. channel[channelIdx];
  757. if (((channel->
  758. flags &
  759. DMA_CHANNEL_FLAG_IS_DEDICATED) ==
  760. 0)
  761. &&
  762. ((channel->
  763. flags & DMA_CHANNEL_FLAG_IN_USE)
  764. == 0)) {
  765. if (((channel->
  766. flags &
  767. DMA_CHANNEL_FLAG_LARGE_FIFO)
  768. != 0)
  769. &&
  770. ((devAttr->
  771. flags &
  772. DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO)
  773. == 0)) {
  774. /* This channel is a large fifo - don't tie it up */
  775. /* with devices that we don't want using it. */
  776. continue;
  777. }
  778. channel->flags |=
  779. DMA_CHANNEL_FLAG_IN_USE;
  780. channel->devType = dev;
  781. devAttr->flags |=
  782. DMA_DEVICE_FLAG_IN_USE;
  783. #if (DMA_DEBUG_TRACK_RESERVATION)
  784. channel->fileName = fileName;
  785. channel->lineNum = lineNum;
  786. #endif
  787. handle =
  788. MAKE_HANDLE(controllerIdx,
  789. channelIdx);
  790. /* Now that we've reserved the channel - we can go ahead and configure it */
  791. if (ConfigChannel(handle) != 0) {
  792. handle = -EIO;
  793. printk(KERN_ERR
  794. "dma_request_channel: ConfigChannel failed\n");
  795. }
  796. goto out;
  797. }
  798. }
  799. }
  800. }
  801. /* No channels are currently available. Let's wait for one to free up. */
  802. {
  803. DEFINE_WAIT(wait);
  804. prepare_to_wait(&gDMA.freeChannelQ, &wait,
  805. TASK_INTERRUPTIBLE);
  806. up(&gDMA.lock);
  807. schedule();
  808. finish_wait(&gDMA.freeChannelQ, &wait);
  809. if (signal_pending(current)) {
  810. /* We don't currently hold gDMA.lock, so we return directly */
  811. return -ERESTARTSYS;
  812. }
  813. }
  814. if (down_interruptible(&gDMA.lock)) {
  815. return -ERESTARTSYS;
  816. }
  817. }
  818. out:
  819. up(&gDMA.lock);
  820. return handle;
  821. }
  822. /* Create both _dbg and non _dbg functions for modules. */
  823. #if (DMA_DEBUG_TRACK_RESERVATION)
  824. #undef dma_request_channel
  825. DMA_Handle_t dma_request_channel(DMA_Device_t dev)
  826. {
  827. return dma_request_channel_dbg(dev, __FILE__, __LINE__);
  828. }
  829. EXPORT_SYMBOL(dma_request_channel_dbg);
  830. #endif
  831. EXPORT_SYMBOL(dma_request_channel);
  832. /****************************************************************************/
  833. /**
  834. * Frees a previously allocated DMA Handle.
  835. */
  836. /****************************************************************************/
  837. int dma_free_channel(DMA_Handle_t handle /* DMA handle. */
  838. ) {
  839. int rc = 0;
  840. DMA_Channel_t *channel;
  841. DMA_DeviceAttribute_t *devAttr;
  842. if (down_interruptible(&gDMA.lock) < 0) {
  843. return -ERESTARTSYS;
  844. }
  845. channel = HandleToChannel(handle);
  846. if (channel == NULL) {
  847. rc = -EINVAL;
  848. goto out;
  849. }
  850. devAttr = &DMA_gDeviceAttribute[channel->devType];
  851. if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) == 0) {
  852. channel->lastDevType = channel->devType;
  853. channel->devType = DMA_DEVICE_NONE;
  854. }
  855. channel->flags &= ~DMA_CHANNEL_FLAG_IN_USE;
  856. devAttr->flags &= ~DMA_DEVICE_FLAG_IN_USE;
  857. out:
  858. up(&gDMA.lock);
  859. wake_up_interruptible(&gDMA.freeChannelQ);
  860. return rc;
  861. }
  862. EXPORT_SYMBOL(dma_free_channel);
  863. /****************************************************************************/
  864. /**
  865. * Determines if a given device has been configured as using a shared
  866. * channel.
  867. *
  868. * @return
  869. * 0 Device uses a dedicated channel
  870. * > zero Device uses a shared channel
  871. * < zero Error code
  872. */
  873. /****************************************************************************/
  874. int dma_device_is_channel_shared(DMA_Device_t device /* Device to check. */
  875. ) {
  876. DMA_DeviceAttribute_t *devAttr;
  877. if (!IsDeviceValid(device)) {
  878. return -ENODEV;
  879. }
  880. devAttr = &DMA_gDeviceAttribute[device];
  881. return ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0);
  882. }
  883. EXPORT_SYMBOL(dma_device_is_channel_shared);
  884. /****************************************************************************/
  885. /**
  886. * Allocates buffers for the descriptors. This is normally done automatically
  887. * but needs to be done explicitly when initiating a dma from interrupt
  888. * context.
  889. *
  890. * @return
  891. * 0 Descriptors were allocated successfully
  892. * -EINVAL Invalid device type for this kind of transfer
  893. * (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM)
  894. * -ENOMEM Memory exhausted
  895. */
  896. /****************************************************************************/
  897. int dma_alloc_descriptors(DMA_Handle_t handle, /* DMA Handle */
  898. dmacHw_TRANSFER_TYPE_e transferType, /* Type of transfer being performed */
  899. dma_addr_t srcData, /* Place to get data to write to device */
  900. dma_addr_t dstData, /* Pointer to device data address */
  901. size_t numBytes /* Number of bytes to transfer to the device */
  902. ) {
  903. DMA_Channel_t *channel;
  904. DMA_DeviceAttribute_t *devAttr;
  905. int numDescriptors;
  906. size_t ringBytesRequired;
  907. int rc = 0;
  908. channel = HandleToChannel(handle);
  909. if (channel == NULL) {
  910. return -ENODEV;
  911. }
  912. devAttr = &DMA_gDeviceAttribute[channel->devType];
  913. if (devAttr->config.transferType != transferType) {
  914. return -EINVAL;
  915. }
  916. /* Figure out how many descriptors we need. */
  917. /* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */
  918. /* srcData, dstData, numBytes); */
  919. numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config,
  920. (void *)srcData,
  921. (void *)dstData,
  922. numBytes);
  923. if (numDescriptors < 0) {
  924. printk(KERN_ERR "%s: dmacHw_calculateDescriptorCount failed\n",
  925. __func__);
  926. return -EINVAL;
  927. }
  928. /* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */
  929. /* a new one. */
  930. ringBytesRequired = dmacHw_descriptorLen(numDescriptors);
  931. /* printk("ringBytesRequired: %d\n", ringBytesRequired); */
  932. if (ringBytesRequired > devAttr->ring.bytesAllocated) {
  933. /* Make sure that this code path is never taken from interrupt context. */
  934. /* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */
  935. /* allocation needs to have already been done. */
  936. might_sleep();
  937. /* Free the old descriptor ring and allocate a new one. */
  938. dma_free_descriptor_ring(&devAttr->ring);
  939. /* And allocate a new one. */
  940. rc =
  941. dma_alloc_descriptor_ring(&devAttr->ring,
  942. numDescriptors);
  943. if (rc < 0) {
  944. printk(KERN_ERR
  945. "%s: dma_alloc_descriptor_ring(%d) failed\n",
  946. __func__, numDescriptors);
  947. return rc;
  948. }
  949. /* Setup the descriptor for this transfer */
  950. if (dmacHw_initDescriptor(devAttr->ring.virtAddr,
  951. devAttr->ring.physAddr,
  952. devAttr->ring.bytesAllocated,
  953. numDescriptors) < 0) {
  954. printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n",
  955. __func__);
  956. return -EINVAL;
  957. }
  958. } else {
  959. /* We've already got enough ring buffer allocated. All we need to do is reset */
  960. /* any control information, just in case the previous DMA was stopped. */
  961. dmacHw_resetDescriptorControl(devAttr->ring.virtAddr);
  962. }
  963. /* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */
  964. /* as last time, then we don't need to call setDataDescriptor again. */
  965. if (dmacHw_setDataDescriptor(&devAttr->config,
  966. devAttr->ring.virtAddr,
  967. (void *)srcData,
  968. (void *)dstData, numBytes) < 0) {
  969. printk(KERN_ERR "%s: dmacHw_setDataDescriptor failed\n",
  970. __func__);
  971. return -EINVAL;
  972. }
  973. /* Remember the critical information for this transfer so that we can eliminate */
  974. /* another call to dma_alloc_descriptors if the caller reuses the same buffers */
  975. devAttr->prevSrcData = srcData;
  976. devAttr->prevDstData = dstData;
  977. devAttr->prevNumBytes = numBytes;
  978. return 0;
  979. }
  980. EXPORT_SYMBOL(dma_alloc_descriptors);
  981. /****************************************************************************/
  982. /**
  983. * Allocates and sets up descriptors for a double buffered circular buffer.
  984. *
  985. * This is primarily intended to be used for things like the ingress samples
  986. * from a microphone.
  987. *
  988. * @return
  989. * > 0 Number of descriptors actually allocated.
  990. * -EINVAL Invalid device type for this kind of transfer
  991. * (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM)
  992. * -ENOMEM Memory exhausted
  993. */
  994. /****************************************************************************/
  995. int dma_alloc_double_dst_descriptors(DMA_Handle_t handle, /* DMA Handle */
  996. dma_addr_t srcData, /* Physical address of source data */
  997. dma_addr_t dstData1, /* Physical address of first destination buffer */
  998. dma_addr_t dstData2, /* Physical address of second destination buffer */
  999. size_t numBytes /* Number of bytes in each destination buffer */
  1000. ) {
  1001. DMA_Channel_t *channel;
  1002. DMA_DeviceAttribute_t *devAttr;
  1003. int numDst1Descriptors;
  1004. int numDst2Descriptors;
  1005. int numDescriptors;
  1006. size_t ringBytesRequired;
  1007. int rc = 0;
  1008. channel = HandleToChannel(handle);
  1009. if (channel == NULL) {
  1010. return -ENODEV;
  1011. }
  1012. devAttr = &DMA_gDeviceAttribute[channel->devType];
  1013. /* Figure out how many descriptors we need. */
  1014. /* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */
  1015. /* srcData, dstData, numBytes); */
  1016. numDst1Descriptors =
  1017. dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData,
  1018. (void *)dstData1, numBytes);
  1019. if (numDst1Descriptors < 0) {
  1020. return -EINVAL;
  1021. }
  1022. numDst2Descriptors =
  1023. dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData,
  1024. (void *)dstData2, numBytes);
  1025. if (numDst2Descriptors < 0) {
  1026. return -EINVAL;
  1027. }
  1028. numDescriptors = numDst1Descriptors + numDst2Descriptors;
  1029. /* printk("numDescriptors: %d\n", numDescriptors); */
  1030. /* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */
  1031. /* a new one. */
  1032. ringBytesRequired = dmacHw_descriptorLen(numDescriptors);
  1033. /* printk("ringBytesRequired: %d\n", ringBytesRequired); */
  1034. if (ringBytesRequired > devAttr->ring.bytesAllocated) {
  1035. /* Make sure that this code path is never taken from interrupt context. */
  1036. /* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */
  1037. /* allocation needs to have already been done. */
  1038. might_sleep();
  1039. /* Free the old descriptor ring and allocate a new one. */
  1040. dma_free_descriptor_ring(&devAttr->ring);
  1041. /* And allocate a new one. */
  1042. rc =
  1043. dma_alloc_descriptor_ring(&devAttr->ring,
  1044. numDescriptors);
  1045. if (rc < 0) {
  1046. printk(KERN_ERR
  1047. "%s: dma_alloc_descriptor_ring(%d) failed\n",
  1048. __func__, ringBytesRequired);
  1049. return rc;
  1050. }
  1051. }
  1052. /* Setup the descriptor for this transfer. Since this function is used with */
  1053. /* CONTINUOUS DMA operations, we need to reinitialize every time, otherwise */
  1054. /* setDataDescriptor will keep trying to append onto the end. */
  1055. if (dmacHw_initDescriptor(devAttr->ring.virtAddr,
  1056. devAttr->ring.physAddr,
  1057. devAttr->ring.bytesAllocated,
  1058. numDescriptors) < 0) {
  1059. printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n", __func__);
  1060. return -EINVAL;
  1061. }
  1062. /* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */
  1063. /* as last time, then we don't need to call setDataDescriptor again. */
  1064. if (dmacHw_setDataDescriptor(&devAttr->config,
  1065. devAttr->ring.virtAddr,
  1066. (void *)srcData,
  1067. (void *)dstData1, numBytes) < 0) {
  1068. printk(KERN_ERR "%s: dmacHw_setDataDescriptor 1 failed\n",
  1069. __func__);
  1070. return -EINVAL;
  1071. }
  1072. if (dmacHw_setDataDescriptor(&devAttr->config,
  1073. devAttr->ring.virtAddr,
  1074. (void *)srcData,
  1075. (void *)dstData2, numBytes) < 0) {
  1076. printk(KERN_ERR "%s: dmacHw_setDataDescriptor 2 failed\n",
  1077. __func__);
  1078. return -EINVAL;
  1079. }
  1080. /* You should use dma_start_transfer rather than dma_transfer_xxx so we don't */
  1081. /* try to make the 'prev' variables right. */
  1082. devAttr->prevSrcData = 0;
  1083. devAttr->prevDstData = 0;
  1084. devAttr->prevNumBytes = 0;
  1085. return numDescriptors;
  1086. }
  1087. EXPORT_SYMBOL(dma_alloc_double_dst_descriptors);
  1088. /****************************************************************************/
  1089. /**
  1090. * Initiates a transfer when the descriptors have already been setup.
  1091. *
  1092. * This is a special case, and normally, the dma_transfer_xxx functions should
  1093. * be used.
  1094. *
  1095. * @return
  1096. * 0 Transfer was started successfully
  1097. * -ENODEV Invalid handle
  1098. */
  1099. /****************************************************************************/
  1100. int dma_start_transfer(DMA_Handle_t handle)
  1101. {
  1102. DMA_Channel_t *channel;
  1103. DMA_DeviceAttribute_t *devAttr;
  1104. channel = HandleToChannel(handle);
  1105. if (channel == NULL) {
  1106. return -ENODEV;
  1107. }
  1108. devAttr = &DMA_gDeviceAttribute[channel->devType];
  1109. dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config,
  1110. devAttr->ring.virtAddr);
  1111. /* Since we got this far, everything went successfully */
  1112. return 0;
  1113. }
  1114. EXPORT_SYMBOL(dma_start_transfer);
  1115. /****************************************************************************/
  1116. /**
  1117. * Stops a previously started DMA transfer.
  1118. *
  1119. * @return
  1120. * 0 Transfer was stopped successfully
  1121. * -ENODEV Invalid handle
  1122. */
  1123. /****************************************************************************/
  1124. int dma_stop_transfer(DMA_Handle_t handle)
  1125. {
  1126. DMA_Channel_t *channel;
  1127. channel = HandleToChannel(handle);
  1128. if (channel == NULL) {
  1129. return -ENODEV;
  1130. }
  1131. dmacHw_stopTransfer(channel->dmacHwHandle);
  1132. return 0;
  1133. }
  1134. EXPORT_SYMBOL(dma_stop_transfer);
  1135. /****************************************************************************/
  1136. /**
  1137. * Waits for a DMA to complete by polling. This function is only intended
  1138. * to be used for testing. Interrupts should be used for most DMA operations.
  1139. */
  1140. /****************************************************************************/
  1141. int dma_wait_transfer_done(DMA_Handle_t handle)
  1142. {
  1143. DMA_Channel_t *channel;
  1144. dmacHw_TRANSFER_STATUS_e status;
  1145. channel = HandleToChannel(handle);
  1146. if (channel == NULL) {
  1147. return -ENODEV;
  1148. }
  1149. while ((status =
  1150. dmacHw_transferCompleted(channel->dmacHwHandle)) ==
  1151. dmacHw_TRANSFER_STATUS_BUSY) {
  1152. ;
  1153. }
  1154. if (status == dmacHw_TRANSFER_STATUS_ERROR) {
  1155. printk(KERN_ERR "%s: DMA transfer failed\n", __func__);
  1156. return -EIO;
  1157. }
  1158. return 0;
  1159. }
  1160. EXPORT_SYMBOL(dma_wait_transfer_done);
  1161. /****************************************************************************/
  1162. /**
  1163. * Initiates a DMA, allocating the descriptors as required.
  1164. *
  1165. * @return
  1166. * 0 Transfer was started successfully
  1167. * -EINVAL Invalid device type for this kind of transfer
  1168. * (i.e. the device is _DEV_TO_MEM and not _MEM_TO_DEV)
  1169. */
  1170. /****************************************************************************/
  1171. int dma_transfer(DMA_Handle_t handle, /* DMA Handle */
  1172. dmacHw_TRANSFER_TYPE_e transferType, /* Type of transfer being performed */
  1173. dma_addr_t srcData, /* Place to get data to write to device */
  1174. dma_addr_t dstData, /* Pointer to device data address */
  1175. size_t numBytes /* Number of bytes to transfer to the device */
  1176. ) {
  1177. DMA_Channel_t *channel;
  1178. DMA_DeviceAttribute_t *devAttr;
  1179. int rc = 0;
  1180. channel = HandleToChannel(handle);
  1181. if (channel == NULL) {
  1182. return -ENODEV;
  1183. }
  1184. devAttr = &DMA_gDeviceAttribute[channel->devType];
  1185. if (devAttr->config.transferType != transferType) {
  1186. return -EINVAL;
  1187. }
  1188. /* We keep track of the information about the previous request for this */
  1189. /* device, and if the attributes match, then we can use the descriptors we setup */
  1190. /* the last time, and not have to reinitialize everything. */
  1191. {
  1192. rc =
  1193. dma_alloc_descriptors(handle, transferType, srcData,
  1194. dstData, numBytes);
  1195. if (rc != 0) {
  1196. return rc;
  1197. }
  1198. }
  1199. /* And kick off the transfer */
  1200. devAttr->numBytes = numBytes;
  1201. devAttr->transferStartTime = timer_get_tick_count();
  1202. dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config,
  1203. devAttr->ring.virtAddr);
  1204. /* Since we got this far, everything went successfully */
  1205. return 0;
  1206. }
  1207. EXPORT_SYMBOL(dma_transfer);
  1208. /****************************************************************************/
  1209. /**
  1210. * Set the callback function which will be called when a transfer completes.
  1211. * If a NULL callback function is set, then no callback will occur.
  1212. *
  1213. * @note @a devHandler will be called from IRQ context.
  1214. *
  1215. * @return
  1216. * 0 - Success
  1217. * -ENODEV - Device handed in is invalid.
  1218. */
  1219. /****************************************************************************/
  1220. int dma_set_device_handler(DMA_Device_t dev, /* Device to set the callback for. */
  1221. DMA_DeviceHandler_t devHandler, /* Function to call when the DMA completes */
  1222. void *userData /* Pointer which will be passed to devHandler. */
  1223. ) {
  1224. DMA_DeviceAttribute_t *devAttr;
  1225. unsigned long flags;
  1226. if (!IsDeviceValid(dev)) {
  1227. return -ENODEV;
  1228. }
  1229. devAttr = &DMA_gDeviceAttribute[dev];
  1230. local_irq_save(flags);
  1231. devAttr->userData = userData;
  1232. devAttr->devHandler = devHandler;
  1233. local_irq_restore(flags);
  1234. return 0;
  1235. }
  1236. EXPORT_SYMBOL(dma_set_device_handler);