ioat_dma.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730
  1. /*
  2. * Intel I/OAT DMA Linux driver
  3. * Copyright(c) 2004 - 2009 Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. *
  21. */
  22. /*
  23. * This driver supports an Intel I/OAT DMA engine, which does asynchronous
  24. * copy operations.
  25. */
  26. #include <linux/init.h>
  27. #include <linux/module.h>
  28. #include <linux/pci.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/dmaengine.h>
  31. #include <linux/delay.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/workqueue.h>
  34. #include <linux/i7300_idle.h>
  35. #include "ioatdma.h"
  36. #include "ioatdma_registers.h"
  37. #include "ioatdma_hw.h"
  38. #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
  39. #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
  40. #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
  41. #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
  42. #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
  43. static int ioat_pending_level = 4;
  44. module_param(ioat_pending_level, int, 0644);
  45. MODULE_PARM_DESC(ioat_pending_level,
  46. "high-water mark for pushing ioat descriptors (default: 4)");
  47. #define RESET_DELAY msecs_to_jiffies(100)
  48. #define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000))
  49. static void ioat_dma_chan_reset_part2(struct work_struct *work);
  50. static void ioat_dma_chan_watchdog(struct work_struct *work);
  51. /*
  52. * workaround for IOAT ver.3.0 null descriptor issue
  53. * (channel returns error when size is 0)
  54. */
  55. #define NULL_DESC_BUFFER_SIZE 1
  56. /* internal functions */
  57. static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
  58. static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
  59. static struct ioat_desc_sw *
  60. ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
  61. static struct ioat_desc_sw *
  62. ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
  63. static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
  64. struct ioatdma_device *device,
  65. int index)
  66. {
  67. return device->idx[index];
  68. }
  69. /**
  70. * ioat_dma_do_interrupt - handler used for single vector interrupt mode
  71. * @irq: interrupt id
  72. * @data: interrupt data
  73. */
  74. static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
  75. {
  76. struct ioatdma_device *instance = data;
  77. struct ioat_dma_chan *ioat_chan;
  78. unsigned long attnstatus;
  79. int bit;
  80. u8 intrctrl;
  81. intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
  82. if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
  83. return IRQ_NONE;
  84. if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
  85. writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
  86. return IRQ_NONE;
  87. }
  88. attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
  89. for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
  90. ioat_chan = ioat_lookup_chan_by_index(instance, bit);
  91. tasklet_schedule(&ioat_chan->cleanup_task);
  92. }
  93. writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
  94. return IRQ_HANDLED;
  95. }
  96. /**
  97. * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
  98. * @irq: interrupt id
  99. * @data: interrupt data
  100. */
  101. static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
  102. {
  103. struct ioat_dma_chan *ioat_chan = data;
  104. tasklet_schedule(&ioat_chan->cleanup_task);
  105. return IRQ_HANDLED;
  106. }
  107. static void ioat_dma_cleanup_tasklet(unsigned long data);
  108. /**
  109. * ioat_dma_enumerate_channels - find and initialize the device's channels
  110. * @device: the device to be enumerated
  111. */
  112. static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
  113. {
  114. u8 xfercap_scale;
  115. u32 xfercap;
  116. int i;
  117. struct ioat_dma_chan *ioat_chan;
  118. /*
  119. * IOAT ver.3 workarounds
  120. */
  121. if (device->version == IOAT_VER_3_0) {
  122. u32 chan_err_mask;
  123. u16 dev_id;
  124. u32 dmauncerrsts;
  125. /*
  126. * Write CHANERRMSK_INT with 3E07h to mask out the errors
  127. * that can cause stability issues for IOAT ver.3
  128. */
  129. chan_err_mask = 0x3E07;
  130. pci_write_config_dword(device->pdev,
  131. IOAT_PCI_CHANERRMASK_INT_OFFSET,
  132. chan_err_mask);
  133. /*
  134. * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
  135. * (workaround for spurious config parity error after restart)
  136. */
  137. pci_read_config_word(device->pdev,
  138. IOAT_PCI_DEVICE_ID_OFFSET,
  139. &dev_id);
  140. if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
  141. dmauncerrsts = 0x10;
  142. pci_write_config_dword(device->pdev,
  143. IOAT_PCI_DMAUNCERRSTS_OFFSET,
  144. dmauncerrsts);
  145. }
  146. }
  147. device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
  148. xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
  149. xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
  150. #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
  151. if (i7300_idle_platform_probe(NULL, NULL) == 0) {
  152. device->common.chancnt--;
  153. }
  154. #endif
  155. for (i = 0; i < device->common.chancnt; i++) {
  156. ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
  157. if (!ioat_chan) {
  158. device->common.chancnt = i;
  159. break;
  160. }
  161. ioat_chan->device = device;
  162. ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
  163. ioat_chan->xfercap = xfercap;
  164. ioat_chan->desccount = 0;
  165. INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
  166. if (ioat_chan->device->version == IOAT_VER_2_0)
  167. writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE |
  168. IOAT_DMA_DCA_ANY_CPU,
  169. ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
  170. else if (ioat_chan->device->version == IOAT_VER_3_0)
  171. writel(IOAT_DMA_DCA_ANY_CPU,
  172. ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
  173. spin_lock_init(&ioat_chan->cleanup_lock);
  174. spin_lock_init(&ioat_chan->desc_lock);
  175. INIT_LIST_HEAD(&ioat_chan->free_desc);
  176. INIT_LIST_HEAD(&ioat_chan->used_desc);
  177. /* This should be made common somewhere in dmaengine.c */
  178. ioat_chan->common.device = &device->common;
  179. list_add_tail(&ioat_chan->common.device_node,
  180. &device->common.channels);
  181. device->idx[i] = ioat_chan;
  182. tasklet_init(&ioat_chan->cleanup_task,
  183. ioat_dma_cleanup_tasklet,
  184. (unsigned long) ioat_chan);
  185. tasklet_disable(&ioat_chan->cleanup_task);
  186. }
  187. return device->common.chancnt;
  188. }
  189. /**
  190. * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
  191. * descriptors to hw
  192. * @chan: DMA channel handle
  193. */
  194. static inline void __ioat1_dma_memcpy_issue_pending(
  195. struct ioat_dma_chan *ioat_chan)
  196. {
  197. ioat_chan->pending = 0;
  198. writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
  199. }
  200. static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
  201. {
  202. struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
  203. if (ioat_chan->pending > 0) {
  204. spin_lock_bh(&ioat_chan->desc_lock);
  205. __ioat1_dma_memcpy_issue_pending(ioat_chan);
  206. spin_unlock_bh(&ioat_chan->desc_lock);
  207. }
  208. }
  209. static inline void __ioat2_dma_memcpy_issue_pending(
  210. struct ioat_dma_chan *ioat_chan)
  211. {
  212. ioat_chan->pending = 0;
  213. writew(ioat_chan->dmacount,
  214. ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
  215. }
  216. static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
  217. {
  218. struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
  219. if (ioat_chan->pending > 0) {
  220. spin_lock_bh(&ioat_chan->desc_lock);
  221. __ioat2_dma_memcpy_issue_pending(ioat_chan);
  222. spin_unlock_bh(&ioat_chan->desc_lock);
  223. }
  224. }
  225. /**
  226. * ioat_dma_chan_reset_part2 - reinit the channel after a reset
  227. */
  228. static void ioat_dma_chan_reset_part2(struct work_struct *work)
  229. {
  230. struct ioat_dma_chan *ioat_chan =
  231. container_of(work, struct ioat_dma_chan, work.work);
  232. struct ioat_desc_sw *desc;
  233. spin_lock_bh(&ioat_chan->cleanup_lock);
  234. spin_lock_bh(&ioat_chan->desc_lock);
  235. ioat_chan->completion_virt->low = 0;
  236. ioat_chan->completion_virt->high = 0;
  237. ioat_chan->pending = 0;
  238. /*
  239. * count the descriptors waiting, and be sure to do it
  240. * right for both the CB1 line and the CB2 ring
  241. */
  242. ioat_chan->dmacount = 0;
  243. if (ioat_chan->used_desc.prev) {
  244. desc = to_ioat_desc(ioat_chan->used_desc.prev);
  245. do {
  246. ioat_chan->dmacount++;
  247. desc = to_ioat_desc(desc->node.next);
  248. } while (&desc->node != ioat_chan->used_desc.next);
  249. }
  250. /*
  251. * write the new starting descriptor address
  252. * this puts channel engine into ARMED state
  253. */
  254. desc = to_ioat_desc(ioat_chan->used_desc.prev);
  255. switch (ioat_chan->device->version) {
  256. case IOAT_VER_1_2:
  257. writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
  258. ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
  259. writel(((u64) desc->async_tx.phys) >> 32,
  260. ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
  261. writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
  262. + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
  263. break;
  264. case IOAT_VER_2_0:
  265. writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
  266. ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
  267. writel(((u64) desc->async_tx.phys) >> 32,
  268. ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
  269. /* tell the engine to go with what's left to be done */
  270. writew(ioat_chan->dmacount,
  271. ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
  272. break;
  273. }
  274. dev_err(&ioat_chan->device->pdev->dev,
  275. "chan%d reset - %d descs waiting, %d total desc\n",
  276. chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
  277. spin_unlock_bh(&ioat_chan->desc_lock);
  278. spin_unlock_bh(&ioat_chan->cleanup_lock);
  279. }
  280. /**
  281. * ioat_dma_reset_channel - restart a channel
  282. * @ioat_chan: IOAT DMA channel handle
  283. */
  284. static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
  285. {
  286. u32 chansts, chanerr;
  287. if (!ioat_chan->used_desc.prev)
  288. return;
  289. chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
  290. chansts = (ioat_chan->completion_virt->low
  291. & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
  292. if (chanerr) {
  293. dev_err(&ioat_chan->device->pdev->dev,
  294. "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
  295. chan_num(ioat_chan), chansts, chanerr);
  296. writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
  297. }
  298. /*
  299. * whack it upside the head with a reset
  300. * and wait for things to settle out.
  301. * force the pending count to a really big negative
  302. * to make sure no one forces an issue_pending
  303. * while we're waiting.
  304. */
  305. spin_lock_bh(&ioat_chan->desc_lock);
  306. ioat_chan->pending = INT_MIN;
  307. writeb(IOAT_CHANCMD_RESET,
  308. ioat_chan->reg_base
  309. + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
  310. spin_unlock_bh(&ioat_chan->desc_lock);
  311. /* schedule the 2nd half instead of sleeping a long time */
  312. schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
  313. }
  314. /**
  315. * ioat_dma_chan_watchdog - watch for stuck channels
  316. */
  317. static void ioat_dma_chan_watchdog(struct work_struct *work)
  318. {
  319. struct ioatdma_device *device =
  320. container_of(work, struct ioatdma_device, work.work);
  321. struct ioat_dma_chan *ioat_chan;
  322. int i;
  323. union {
  324. u64 full;
  325. struct {
  326. u32 low;
  327. u32 high;
  328. };
  329. } completion_hw;
  330. unsigned long compl_desc_addr_hw;
  331. for (i = 0; i < device->common.chancnt; i++) {
  332. ioat_chan = ioat_lookup_chan_by_index(device, i);
  333. if (ioat_chan->device->version == IOAT_VER_1_2
  334. /* have we started processing anything yet */
  335. && ioat_chan->last_completion
  336. /* have we completed any since last watchdog cycle? */
  337. && (ioat_chan->last_completion ==
  338. ioat_chan->watchdog_completion)
  339. /* has TCP stuck on one cookie since last watchdog? */
  340. && (ioat_chan->watchdog_tcp_cookie ==
  341. ioat_chan->watchdog_last_tcp_cookie)
  342. && (ioat_chan->watchdog_tcp_cookie !=
  343. ioat_chan->completed_cookie)
  344. /* is there something in the chain to be processed? */
  345. /* CB1 chain always has at least the last one processed */
  346. && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
  347. && ioat_chan->pending == 0) {
  348. /*
  349. * check CHANSTS register for completed
  350. * descriptor address.
  351. * if it is different than completion writeback,
  352. * it is not zero
  353. * and it has changed since the last watchdog
  354. * we can assume that channel
  355. * is still working correctly
  356. * and the problem is in completion writeback.
  357. * update completion writeback
  358. * with actual CHANSTS value
  359. * else
  360. * try resetting the channel
  361. */
  362. completion_hw.low = readl(ioat_chan->reg_base +
  363. IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
  364. completion_hw.high = readl(ioat_chan->reg_base +
  365. IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
  366. #if (BITS_PER_LONG == 64)
  367. compl_desc_addr_hw =
  368. completion_hw.full
  369. & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
  370. #else
  371. compl_desc_addr_hw =
  372. completion_hw.low & IOAT_LOW_COMPLETION_MASK;
  373. #endif
  374. if ((compl_desc_addr_hw != 0)
  375. && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
  376. && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
  377. ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
  378. ioat_chan->completion_virt->low = completion_hw.low;
  379. ioat_chan->completion_virt->high = completion_hw.high;
  380. } else {
  381. ioat_dma_reset_channel(ioat_chan);
  382. ioat_chan->watchdog_completion = 0;
  383. ioat_chan->last_compl_desc_addr_hw = 0;
  384. }
  385. /*
  386. * for version 2.0 if there are descriptors yet to be processed
  387. * and the last completed hasn't changed since the last watchdog
  388. * if they haven't hit the pending level
  389. * issue the pending to push them through
  390. * else
  391. * try resetting the channel
  392. */
  393. } else if (ioat_chan->device->version == IOAT_VER_2_0
  394. && ioat_chan->used_desc.prev
  395. && ioat_chan->last_completion
  396. && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
  397. if (ioat_chan->pending < ioat_pending_level)
  398. ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
  399. else {
  400. ioat_dma_reset_channel(ioat_chan);
  401. ioat_chan->watchdog_completion = 0;
  402. }
  403. } else {
  404. ioat_chan->last_compl_desc_addr_hw = 0;
  405. ioat_chan->watchdog_completion
  406. = ioat_chan->last_completion;
  407. }
  408. ioat_chan->watchdog_last_tcp_cookie =
  409. ioat_chan->watchdog_tcp_cookie;
  410. }
  411. schedule_delayed_work(&device->work, WATCHDOG_DELAY);
  412. }
  413. static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
  414. {
  415. struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
  416. struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
  417. struct ioat_desc_sw *prev, *new;
  418. struct ioat_dma_descriptor *hw;
  419. dma_cookie_t cookie;
  420. LIST_HEAD(new_chain);
  421. u32 copy;
  422. size_t len;
  423. dma_addr_t src, dst;
  424. unsigned long orig_flags;
  425. unsigned int desc_count = 0;
  426. /* src and dest and len are stored in the initial descriptor */
  427. len = first->len;
  428. src = first->src;
  429. dst = first->dst;
  430. orig_flags = first->async_tx.flags;
  431. new = first;
  432. spin_lock_bh(&ioat_chan->desc_lock);
  433. prev = to_ioat_desc(ioat_chan->used_desc.prev);
  434. prefetch(prev->hw);
  435. do {
  436. copy = min_t(size_t, len, ioat_chan->xfercap);
  437. async_tx_ack(&new->async_tx);
  438. hw = new->hw;
  439. hw->size = copy;
  440. hw->ctl = 0;
  441. hw->src_addr = src;
  442. hw->dst_addr = dst;
  443. hw->next = 0;
  444. /* chain together the physical address list for the HW */
  445. wmb();
  446. prev->hw->next = (u64) new->async_tx.phys;
  447. len -= copy;
  448. dst += copy;
  449. src += copy;
  450. list_add_tail(&new->node, &new_chain);
  451. desc_count++;
  452. prev = new;
  453. } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
  454. if (!new) {
  455. dev_err(&ioat_chan->device->pdev->dev,
  456. "tx submit failed\n");
  457. spin_unlock_bh(&ioat_chan->desc_lock);
  458. return -ENOMEM;
  459. }
  460. hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
  461. if (first->async_tx.callback) {
  462. hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
  463. if (first != new) {
  464. /* move callback into to last desc */
  465. new->async_tx.callback = first->async_tx.callback;
  466. new->async_tx.callback_param
  467. = first->async_tx.callback_param;
  468. first->async_tx.callback = NULL;
  469. first->async_tx.callback_param = NULL;
  470. }
  471. }
  472. new->tx_cnt = desc_count;
  473. new->async_tx.flags = orig_flags; /* client is in control of this ack */
  474. /* store the original values for use in later cleanup */
  475. if (new != first) {
  476. new->src = first->src;
  477. new->dst = first->dst;
  478. new->len = first->len;
  479. }
  480. /* cookie incr and addition to used_list must be atomic */
  481. cookie = ioat_chan->common.cookie;
  482. cookie++;
  483. if (cookie < 0)
  484. cookie = 1;
  485. ioat_chan->common.cookie = new->async_tx.cookie = cookie;
  486. /* write address into NextDescriptor field of last desc in chain */
  487. to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
  488. first->async_tx.phys;
  489. list_splice_tail(&new_chain, &ioat_chan->used_desc);
  490. ioat_chan->dmacount += desc_count;
  491. ioat_chan->pending += desc_count;
  492. if (ioat_chan->pending >= ioat_pending_level)
  493. __ioat1_dma_memcpy_issue_pending(ioat_chan);
  494. spin_unlock_bh(&ioat_chan->desc_lock);
  495. return cookie;
  496. }
  497. static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
  498. {
  499. struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
  500. struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
  501. struct ioat_desc_sw *new;
  502. struct ioat_dma_descriptor *hw;
  503. dma_cookie_t cookie;
  504. u32 copy;
  505. size_t len;
  506. dma_addr_t src, dst;
  507. unsigned long orig_flags;
  508. unsigned int desc_count = 0;
  509. /* src and dest and len are stored in the initial descriptor */
  510. len = first->len;
  511. src = first->src;
  512. dst = first->dst;
  513. orig_flags = first->async_tx.flags;
  514. new = first;
  515. /*
  516. * ioat_chan->desc_lock is still in force in version 2 path
  517. * it gets unlocked at end of this function
  518. */
  519. do {
  520. copy = min_t(size_t, len, ioat_chan->xfercap);
  521. async_tx_ack(&new->async_tx);
  522. hw = new->hw;
  523. hw->size = copy;
  524. hw->ctl = 0;
  525. hw->src_addr = src;
  526. hw->dst_addr = dst;
  527. len -= copy;
  528. dst += copy;
  529. src += copy;
  530. desc_count++;
  531. } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
  532. if (!new) {
  533. dev_err(&ioat_chan->device->pdev->dev,
  534. "tx submit failed\n");
  535. spin_unlock_bh(&ioat_chan->desc_lock);
  536. return -ENOMEM;
  537. }
  538. hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
  539. if (first->async_tx.callback) {
  540. hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
  541. if (first != new) {
  542. /* move callback into to last desc */
  543. new->async_tx.callback = first->async_tx.callback;
  544. new->async_tx.callback_param
  545. = first->async_tx.callback_param;
  546. first->async_tx.callback = NULL;
  547. first->async_tx.callback_param = NULL;
  548. }
  549. }
  550. new->tx_cnt = desc_count;
  551. new->async_tx.flags = orig_flags; /* client is in control of this ack */
  552. /* store the original values for use in later cleanup */
  553. if (new != first) {
  554. new->src = first->src;
  555. new->dst = first->dst;
  556. new->len = first->len;
  557. }
  558. /* cookie incr and addition to used_list must be atomic */
  559. cookie = ioat_chan->common.cookie;
  560. cookie++;
  561. if (cookie < 0)
  562. cookie = 1;
  563. ioat_chan->common.cookie = new->async_tx.cookie = cookie;
  564. ioat_chan->dmacount += desc_count;
  565. ioat_chan->pending += desc_count;
  566. if (ioat_chan->pending >= ioat_pending_level)
  567. __ioat2_dma_memcpy_issue_pending(ioat_chan);
  568. spin_unlock_bh(&ioat_chan->desc_lock);
  569. return cookie;
  570. }
  571. /**
  572. * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
  573. * @ioat_chan: the channel supplying the memory pool for the descriptors
  574. * @flags: allocation flags
  575. */
  576. static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
  577. struct ioat_dma_chan *ioat_chan,
  578. gfp_t flags)
  579. {
  580. struct ioat_dma_descriptor *desc;
  581. struct ioat_desc_sw *desc_sw;
  582. struct ioatdma_device *ioatdma_device;
  583. dma_addr_t phys;
  584. ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
  585. desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
  586. if (unlikely(!desc))
  587. return NULL;
  588. desc_sw = kzalloc(sizeof(*desc_sw), flags);
  589. if (unlikely(!desc_sw)) {
  590. pci_pool_free(ioatdma_device->dma_pool, desc, phys);
  591. return NULL;
  592. }
  593. memset(desc, 0, sizeof(*desc));
  594. dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
  595. switch (ioat_chan->device->version) {
  596. case IOAT_VER_1_2:
  597. desc_sw->async_tx.tx_submit = ioat1_tx_submit;
  598. break;
  599. case IOAT_VER_2_0:
  600. case IOAT_VER_3_0:
  601. desc_sw->async_tx.tx_submit = ioat2_tx_submit;
  602. break;
  603. }
  604. desc_sw->hw = desc;
  605. desc_sw->async_tx.phys = phys;
  606. return desc_sw;
  607. }
  608. static int ioat_initial_desc_count = 256;
  609. module_param(ioat_initial_desc_count, int, 0644);
  610. MODULE_PARM_DESC(ioat_initial_desc_count,
  611. "initial descriptors per channel (default: 256)");
  612. /**
  613. * ioat2_dma_massage_chan_desc - link the descriptors into a circle
  614. * @ioat_chan: the channel to be massaged
  615. */
  616. static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
  617. {
  618. struct ioat_desc_sw *desc, *_desc;
  619. /* setup used_desc */
  620. ioat_chan->used_desc.next = ioat_chan->free_desc.next;
  621. ioat_chan->used_desc.prev = NULL;
  622. /* pull free_desc out of the circle so that every node is a hw
  623. * descriptor, but leave it pointing to the list
  624. */
  625. ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
  626. ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
  627. /* circle link the hw descriptors */
  628. desc = to_ioat_desc(ioat_chan->free_desc.next);
  629. desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
  630. list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
  631. desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
  632. }
  633. }
  634. /**
  635. * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
  636. * @chan: the channel to be filled out
  637. */
  638. static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
  639. {
  640. struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
  641. struct ioat_desc_sw *desc;
  642. u16 chanctrl;
  643. u32 chanerr;
  644. int i;
  645. LIST_HEAD(tmp_list);
  646. /* have we already been set up? */
  647. if (!list_empty(&ioat_chan->free_desc))
  648. return ioat_chan->desccount;
  649. /* Setup register to interrupt and write completion status on error */
  650. chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
  651. IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
  652. IOAT_CHANCTRL_ERR_COMPLETION_EN;
  653. writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
  654. chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
  655. if (chanerr) {
  656. dev_err(&ioat_chan->device->pdev->dev,
  657. "CHANERR = %x, clearing\n", chanerr);
  658. writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
  659. }
  660. /* Allocate descriptors */
  661. for (i = 0; i < ioat_initial_desc_count; i++) {
  662. desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
  663. if (!desc) {
  664. dev_err(&ioat_chan->device->pdev->dev,
  665. "Only %d initial descriptors\n", i);
  666. break;
  667. }
  668. list_add_tail(&desc->node, &tmp_list);
  669. }
  670. spin_lock_bh(&ioat_chan->desc_lock);
  671. ioat_chan->desccount = i;
  672. list_splice(&tmp_list, &ioat_chan->free_desc);
  673. if (ioat_chan->device->version != IOAT_VER_1_2)
  674. ioat2_dma_massage_chan_desc(ioat_chan);
  675. spin_unlock_bh(&ioat_chan->desc_lock);
  676. /* allocate a completion writeback area */
  677. /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
  678. ioat_chan->completion_virt =
  679. pci_pool_alloc(ioat_chan->device->completion_pool,
  680. GFP_KERNEL,
  681. &ioat_chan->completion_addr);
  682. memset(ioat_chan->completion_virt, 0,
  683. sizeof(*ioat_chan->completion_virt));
  684. writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
  685. ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
  686. writel(((u64) ioat_chan->completion_addr) >> 32,
  687. ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
  688. tasklet_enable(&ioat_chan->cleanup_task);
  689. ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
  690. return ioat_chan->desccount;
  691. }
  692. /**
  693. * ioat_dma_free_chan_resources - release all the descriptors
  694. * @chan: the channel to be cleaned
  695. */
  696. static void ioat_dma_free_chan_resources(struct dma_chan *chan)
  697. {
  698. struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
  699. struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
  700. struct ioat_desc_sw *desc, *_desc;
  701. int in_use_descs = 0;
  702. /* Before freeing channel resources first check
  703. * if they have been previously allocated for this channel.
  704. */
  705. if (ioat_chan->desccount == 0)
  706. return;
  707. tasklet_disable(&ioat_chan->cleanup_task);
  708. ioat_dma_memcpy_cleanup(ioat_chan);
  709. /* Delay 100ms after reset to allow internal DMA logic to quiesce
  710. * before removing DMA descriptor resources.
  711. */
  712. writeb(IOAT_CHANCMD_RESET,
  713. ioat_chan->reg_base
  714. + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
  715. mdelay(100);
  716. spin_lock_bh(&ioat_chan->desc_lock);
  717. switch (ioat_chan->device->version) {
  718. case IOAT_VER_1_2:
  719. list_for_each_entry_safe(desc, _desc,
  720. &ioat_chan->used_desc, node) {
  721. in_use_descs++;
  722. list_del(&desc->node);
  723. pci_pool_free(ioatdma_device->dma_pool, desc->hw,
  724. desc->async_tx.phys);
  725. kfree(desc);
  726. }
  727. list_for_each_entry_safe(desc, _desc,
  728. &ioat_chan->free_desc, node) {
  729. list_del(&desc->node);
  730. pci_pool_free(ioatdma_device->dma_pool, desc->hw,
  731. desc->async_tx.phys);
  732. kfree(desc);
  733. }
  734. break;
  735. case IOAT_VER_2_0:
  736. case IOAT_VER_3_0:
  737. list_for_each_entry_safe(desc, _desc,
  738. ioat_chan->free_desc.next, node) {
  739. list_del(&desc->node);
  740. pci_pool_free(ioatdma_device->dma_pool, desc->hw,
  741. desc->async_tx.phys);
  742. kfree(desc);
  743. }
  744. desc = to_ioat_desc(ioat_chan->free_desc.next);
  745. pci_pool_free(ioatdma_device->dma_pool, desc->hw,
  746. desc->async_tx.phys);
  747. kfree(desc);
  748. INIT_LIST_HEAD(&ioat_chan->free_desc);
  749. INIT_LIST_HEAD(&ioat_chan->used_desc);
  750. break;
  751. }
  752. spin_unlock_bh(&ioat_chan->desc_lock);
  753. pci_pool_free(ioatdma_device->completion_pool,
  754. ioat_chan->completion_virt,
  755. ioat_chan->completion_addr);
  756. /* one is ok since we left it on there on purpose */
  757. if (in_use_descs > 1)
  758. dev_err(&ioat_chan->device->pdev->dev,
  759. "Freeing %d in use descriptors!\n",
  760. in_use_descs - 1);
  761. ioat_chan->last_completion = ioat_chan->completion_addr = 0;
  762. ioat_chan->pending = 0;
  763. ioat_chan->dmacount = 0;
  764. ioat_chan->desccount = 0;
  765. ioat_chan->watchdog_completion = 0;
  766. ioat_chan->last_compl_desc_addr_hw = 0;
  767. ioat_chan->watchdog_tcp_cookie =
  768. ioat_chan->watchdog_last_tcp_cookie = 0;
  769. }
  770. /**
  771. * ioat_dma_get_next_descriptor - return the next available descriptor
  772. * @ioat_chan: IOAT DMA channel handle
  773. *
  774. * Gets the next descriptor from the chain, and must be called with the
  775. * channel's desc_lock held. Allocates more descriptors if the channel
  776. * has run out.
  777. */
  778. static struct ioat_desc_sw *
  779. ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
  780. {
  781. struct ioat_desc_sw *new;
  782. if (!list_empty(&ioat_chan->free_desc)) {
  783. new = to_ioat_desc(ioat_chan->free_desc.next);
  784. list_del(&new->node);
  785. } else {
  786. /* try to get another desc */
  787. new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
  788. if (!new) {
  789. dev_err(&ioat_chan->device->pdev->dev,
  790. "alloc failed\n");
  791. return NULL;
  792. }
  793. }
  794. prefetch(new->hw);
  795. return new;
  796. }
  797. static struct ioat_desc_sw *
  798. ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
  799. {
  800. struct ioat_desc_sw *new;
  801. /*
  802. * used.prev points to where to start processing
  803. * used.next points to next free descriptor
  804. * if used.prev == NULL, there are none waiting to be processed
  805. * if used.next == used.prev.prev, there is only one free descriptor,
  806. * and we need to use it to as a noop descriptor before
  807. * linking in a new set of descriptors, since the device
  808. * has probably already read the pointer to it
  809. */
  810. if (ioat_chan->used_desc.prev &&
  811. ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
  812. struct ioat_desc_sw *desc;
  813. struct ioat_desc_sw *noop_desc;
  814. int i;
  815. /* set up the noop descriptor */
  816. noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
  817. /* set size to non-zero value (channel returns error when size is 0) */
  818. noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
  819. noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
  820. noop_desc->hw->src_addr = 0;
  821. noop_desc->hw->dst_addr = 0;
  822. ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
  823. ioat_chan->pending++;
  824. ioat_chan->dmacount++;
  825. /* try to get a few more descriptors */
  826. for (i = 16; i; i--) {
  827. desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
  828. if (!desc) {
  829. dev_err(&ioat_chan->device->pdev->dev,
  830. "alloc failed\n");
  831. break;
  832. }
  833. list_add_tail(&desc->node, ioat_chan->used_desc.next);
  834. desc->hw->next
  835. = to_ioat_desc(desc->node.next)->async_tx.phys;
  836. to_ioat_desc(desc->node.prev)->hw->next
  837. = desc->async_tx.phys;
  838. ioat_chan->desccount++;
  839. }
  840. ioat_chan->used_desc.next = noop_desc->node.next;
  841. }
  842. new = to_ioat_desc(ioat_chan->used_desc.next);
  843. prefetch(new);
  844. ioat_chan->used_desc.next = new->node.next;
  845. if (ioat_chan->used_desc.prev == NULL)
  846. ioat_chan->used_desc.prev = &new->node;
  847. prefetch(new->hw);
  848. return new;
  849. }
  850. static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
  851. struct ioat_dma_chan *ioat_chan)
  852. {
  853. if (!ioat_chan)
  854. return NULL;
  855. switch (ioat_chan->device->version) {
  856. case IOAT_VER_1_2:
  857. return ioat1_dma_get_next_descriptor(ioat_chan);
  858. case IOAT_VER_2_0:
  859. case IOAT_VER_3_0:
  860. return ioat2_dma_get_next_descriptor(ioat_chan);
  861. }
  862. return NULL;
  863. }
  864. static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
  865. struct dma_chan *chan,
  866. dma_addr_t dma_dest,
  867. dma_addr_t dma_src,
  868. size_t len,
  869. unsigned long flags)
  870. {
  871. struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
  872. struct ioat_desc_sw *new;
  873. spin_lock_bh(&ioat_chan->desc_lock);
  874. new = ioat_dma_get_next_descriptor(ioat_chan);
  875. spin_unlock_bh(&ioat_chan->desc_lock);
  876. if (new) {
  877. new->len = len;
  878. new->dst = dma_dest;
  879. new->src = dma_src;
  880. new->async_tx.flags = flags;
  881. return &new->async_tx;
  882. } else {
  883. dev_err(&ioat_chan->device->pdev->dev,
  884. "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
  885. chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
  886. return NULL;
  887. }
  888. }
  889. static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
  890. struct dma_chan *chan,
  891. dma_addr_t dma_dest,
  892. dma_addr_t dma_src,
  893. size_t len,
  894. unsigned long flags)
  895. {
  896. struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
  897. struct ioat_desc_sw *new;
  898. spin_lock_bh(&ioat_chan->desc_lock);
  899. new = ioat2_dma_get_next_descriptor(ioat_chan);
  900. /*
  901. * leave ioat_chan->desc_lock set in ioat 2 path
  902. * it will get unlocked at end of tx_submit
  903. */
  904. if (new) {
  905. new->len = len;
  906. new->dst = dma_dest;
  907. new->src = dma_src;
  908. new->async_tx.flags = flags;
  909. return &new->async_tx;
  910. } else {
  911. spin_unlock_bh(&ioat_chan->desc_lock);
  912. dev_err(&ioat_chan->device->pdev->dev,
  913. "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
  914. chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
  915. return NULL;
  916. }
  917. }
  918. static void ioat_dma_cleanup_tasklet(unsigned long data)
  919. {
  920. struct ioat_dma_chan *chan = (void *)data;
  921. ioat_dma_memcpy_cleanup(chan);
  922. writew(IOAT_CHANCTRL_INT_DISABLE,
  923. chan->reg_base + IOAT_CHANCTRL_OFFSET);
  924. }
  925. static void
  926. ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
  927. {
  928. /*
  929. * yes we are unmapping both _page and _single
  930. * alloc'd regions with unmap_page. Is this
  931. * *really* that bad?
  932. */
  933. if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
  934. pci_unmap_page(ioat_chan->device->pdev,
  935. pci_unmap_addr(desc, dst),
  936. pci_unmap_len(desc, len),
  937. PCI_DMA_FROMDEVICE);
  938. if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
  939. pci_unmap_page(ioat_chan->device->pdev,
  940. pci_unmap_addr(desc, src),
  941. pci_unmap_len(desc, len),
  942. PCI_DMA_TODEVICE);
  943. }
  944. /**
  945. * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
  946. * @chan: ioat channel to be cleaned up
  947. */
  948. static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
  949. {
  950. unsigned long phys_complete;
  951. struct ioat_desc_sw *desc, *_desc;
  952. dma_cookie_t cookie = 0;
  953. unsigned long desc_phys;
  954. struct ioat_desc_sw *latest_desc;
  955. prefetch(ioat_chan->completion_virt);
  956. if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
  957. return;
  958. /* The completion writeback can happen at any time,
  959. so reads by the driver need to be atomic operations
  960. The descriptor physical addresses are limited to 32-bits
  961. when the CPU can only do a 32-bit mov */
  962. #if (BITS_PER_LONG == 64)
  963. phys_complete =
  964. ioat_chan->completion_virt->full
  965. & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
  966. #else
  967. phys_complete =
  968. ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
  969. #endif
  970. if ((ioat_chan->completion_virt->full
  971. & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
  972. IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
  973. dev_err(&ioat_chan->device->pdev->dev,
  974. "Channel halted, chanerr = %x\n",
  975. readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
  976. /* TODO do something to salvage the situation */
  977. }
  978. if (phys_complete == ioat_chan->last_completion) {
  979. spin_unlock_bh(&ioat_chan->cleanup_lock);
  980. /*
  981. * perhaps we're stuck so hard that the watchdog can't go off?
  982. * try to catch it after 2 seconds
  983. */
  984. if (ioat_chan->device->version != IOAT_VER_3_0) {
  985. if (time_after(jiffies,
  986. ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
  987. ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
  988. ioat_chan->last_completion_time = jiffies;
  989. }
  990. }
  991. return;
  992. }
  993. ioat_chan->last_completion_time = jiffies;
  994. cookie = 0;
  995. if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
  996. spin_unlock_bh(&ioat_chan->cleanup_lock);
  997. return;
  998. }
  999. switch (ioat_chan->device->version) {
  1000. case IOAT_VER_1_2:
  1001. list_for_each_entry_safe(desc, _desc,
  1002. &ioat_chan->used_desc, node) {
  1003. /*
  1004. * Incoming DMA requests may use multiple descriptors,
  1005. * due to exceeding xfercap, perhaps. If so, only the
  1006. * last one will have a cookie, and require unmapping.
  1007. */
  1008. if (desc->async_tx.cookie) {
  1009. cookie = desc->async_tx.cookie;
  1010. ioat_dma_unmap(ioat_chan, desc);
  1011. if (desc->async_tx.callback) {
  1012. desc->async_tx.callback(desc->async_tx.callback_param);
  1013. desc->async_tx.callback = NULL;
  1014. }
  1015. }
  1016. if (desc->async_tx.phys != phys_complete) {
  1017. /*
  1018. * a completed entry, but not the last, so clean
  1019. * up if the client is done with the descriptor
  1020. */
  1021. if (async_tx_test_ack(&desc->async_tx)) {
  1022. list_move_tail(&desc->node,
  1023. &ioat_chan->free_desc);
  1024. } else
  1025. desc->async_tx.cookie = 0;
  1026. } else {
  1027. /*
  1028. * last used desc. Do not remove, so we can
  1029. * append from it, but don't look at it next
  1030. * time, either
  1031. */
  1032. desc->async_tx.cookie = 0;
  1033. /* TODO check status bits? */
  1034. break;
  1035. }
  1036. }
  1037. break;
  1038. case IOAT_VER_2_0:
  1039. case IOAT_VER_3_0:
  1040. /* has some other thread has already cleaned up? */
  1041. if (ioat_chan->used_desc.prev == NULL)
  1042. break;
  1043. /* work backwards to find latest finished desc */
  1044. desc = to_ioat_desc(ioat_chan->used_desc.next);
  1045. latest_desc = NULL;
  1046. do {
  1047. desc = to_ioat_desc(desc->node.prev);
  1048. desc_phys = (unsigned long)desc->async_tx.phys
  1049. & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
  1050. if (desc_phys == phys_complete) {
  1051. latest_desc = desc;
  1052. break;
  1053. }
  1054. } while (&desc->node != ioat_chan->used_desc.prev);
  1055. if (latest_desc != NULL) {
  1056. /* work forwards to clear finished descriptors */
  1057. for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
  1058. &desc->node != latest_desc->node.next &&
  1059. &desc->node != ioat_chan->used_desc.next;
  1060. desc = to_ioat_desc(desc->node.next)) {
  1061. if (desc->async_tx.cookie) {
  1062. cookie = desc->async_tx.cookie;
  1063. desc->async_tx.cookie = 0;
  1064. ioat_dma_unmap(ioat_chan, desc);
  1065. if (desc->async_tx.callback) {
  1066. desc->async_tx.callback(desc->async_tx.callback_param);
  1067. desc->async_tx.callback = NULL;
  1068. }
  1069. }
  1070. }
  1071. /* move used.prev up beyond those that are finished */
  1072. if (&desc->node == ioat_chan->used_desc.next)
  1073. ioat_chan->used_desc.prev = NULL;
  1074. else
  1075. ioat_chan->used_desc.prev = &desc->node;
  1076. }
  1077. break;
  1078. }
  1079. spin_unlock_bh(&ioat_chan->desc_lock);
  1080. ioat_chan->last_completion = phys_complete;
  1081. if (cookie != 0)
  1082. ioat_chan->completed_cookie = cookie;
  1083. spin_unlock_bh(&ioat_chan->cleanup_lock);
  1084. }
  1085. /**
  1086. * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
  1087. * @chan: IOAT DMA channel handle
  1088. * @cookie: DMA transaction identifier
  1089. * @done: if not %NULL, updated with last completed transaction
  1090. * @used: if not %NULL, updated with last used transaction
  1091. */
  1092. static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
  1093. dma_cookie_t cookie,
  1094. dma_cookie_t *done,
  1095. dma_cookie_t *used)
  1096. {
  1097. struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
  1098. dma_cookie_t last_used;
  1099. dma_cookie_t last_complete;
  1100. enum dma_status ret;
  1101. last_used = chan->cookie;
  1102. last_complete = ioat_chan->completed_cookie;
  1103. ioat_chan->watchdog_tcp_cookie = cookie;
  1104. if (done)
  1105. *done = last_complete;
  1106. if (used)
  1107. *used = last_used;
  1108. ret = dma_async_is_complete(cookie, last_complete, last_used);
  1109. if (ret == DMA_SUCCESS)
  1110. return ret;
  1111. ioat_dma_memcpy_cleanup(ioat_chan);
  1112. last_used = chan->cookie;
  1113. last_complete = ioat_chan->completed_cookie;
  1114. if (done)
  1115. *done = last_complete;
  1116. if (used)
  1117. *used = last_used;
  1118. return dma_async_is_complete(cookie, last_complete, last_used);
  1119. }
  1120. static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
  1121. {
  1122. struct ioat_desc_sw *desc;
  1123. spin_lock_bh(&ioat_chan->desc_lock);
  1124. desc = ioat_dma_get_next_descriptor(ioat_chan);
  1125. if (!desc) {
  1126. dev_err(&ioat_chan->device->pdev->dev,
  1127. "Unable to start null desc - get next desc failed\n");
  1128. spin_unlock_bh(&ioat_chan->desc_lock);
  1129. return;
  1130. }
  1131. desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
  1132. | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
  1133. | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
  1134. /* set size to non-zero value (channel returns error when size is 0) */
  1135. desc->hw->size = NULL_DESC_BUFFER_SIZE;
  1136. desc->hw->src_addr = 0;
  1137. desc->hw->dst_addr = 0;
  1138. async_tx_ack(&desc->async_tx);
  1139. switch (ioat_chan->device->version) {
  1140. case IOAT_VER_1_2:
  1141. desc->hw->next = 0;
  1142. list_add_tail(&desc->node, &ioat_chan->used_desc);
  1143. writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
  1144. ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
  1145. writel(((u64) desc->async_tx.phys) >> 32,
  1146. ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
  1147. writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
  1148. + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
  1149. break;
  1150. case IOAT_VER_2_0:
  1151. case IOAT_VER_3_0:
  1152. writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
  1153. ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
  1154. writel(((u64) desc->async_tx.phys) >> 32,
  1155. ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
  1156. ioat_chan->dmacount++;
  1157. __ioat2_dma_memcpy_issue_pending(ioat_chan);
  1158. break;
  1159. }
  1160. spin_unlock_bh(&ioat_chan->desc_lock);
  1161. }
  1162. /*
  1163. * Perform a IOAT transaction to verify the HW works.
  1164. */
  1165. #define IOAT_TEST_SIZE 2000
  1166. static void ioat_dma_test_callback(void *dma_async_param)
  1167. {
  1168. struct completion *cmp = dma_async_param;
  1169. complete(cmp);
  1170. }
  1171. /**
  1172. * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
  1173. * @device: device to be tested
  1174. */
  1175. static int ioat_dma_self_test(struct ioatdma_device *device)
  1176. {
  1177. int i;
  1178. u8 *src;
  1179. u8 *dest;
  1180. struct dma_chan *dma_chan;
  1181. struct dma_async_tx_descriptor *tx;
  1182. dma_addr_t dma_dest, dma_src;
  1183. dma_cookie_t cookie;
  1184. int err = 0;
  1185. struct completion cmp;
  1186. unsigned long tmo;
  1187. src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
  1188. if (!src)
  1189. return -ENOMEM;
  1190. dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
  1191. if (!dest) {
  1192. kfree(src);
  1193. return -ENOMEM;
  1194. }
  1195. /* Fill in src buffer */
  1196. for (i = 0; i < IOAT_TEST_SIZE; i++)
  1197. src[i] = (u8)i;
  1198. /* Start copy, using first DMA channel */
  1199. dma_chan = container_of(device->common.channels.next,
  1200. struct dma_chan,
  1201. device_node);
  1202. if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
  1203. dev_err(&device->pdev->dev,
  1204. "selftest cannot allocate chan resource\n");
  1205. err = -ENODEV;
  1206. goto out;
  1207. }
  1208. dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
  1209. DMA_TO_DEVICE);
  1210. dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
  1211. DMA_FROM_DEVICE);
  1212. tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
  1213. IOAT_TEST_SIZE, 0);
  1214. if (!tx) {
  1215. dev_err(&device->pdev->dev,
  1216. "Self-test prep failed, disabling\n");
  1217. err = -ENODEV;
  1218. goto free_resources;
  1219. }
  1220. async_tx_ack(tx);
  1221. init_completion(&cmp);
  1222. tx->callback = ioat_dma_test_callback;
  1223. tx->callback_param = &cmp;
  1224. cookie = tx->tx_submit(tx);
  1225. if (cookie < 0) {
  1226. dev_err(&device->pdev->dev,
  1227. "Self-test setup failed, disabling\n");
  1228. err = -ENODEV;
  1229. goto free_resources;
  1230. }
  1231. device->common.device_issue_pending(dma_chan);
  1232. tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
  1233. if (tmo == 0 ||
  1234. device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
  1235. != DMA_SUCCESS) {
  1236. dev_err(&device->pdev->dev,
  1237. "Self-test copy timed out, disabling\n");
  1238. err = -ENODEV;
  1239. goto free_resources;
  1240. }
  1241. if (memcmp(src, dest, IOAT_TEST_SIZE)) {
  1242. dev_err(&device->pdev->dev,
  1243. "Self-test copy failed compare, disabling\n");
  1244. err = -ENODEV;
  1245. goto free_resources;
  1246. }
  1247. free_resources:
  1248. device->common.device_free_chan_resources(dma_chan);
  1249. out:
  1250. kfree(src);
  1251. kfree(dest);
  1252. return err;
  1253. }
  1254. static char ioat_interrupt_style[32] = "msix";
  1255. module_param_string(ioat_interrupt_style, ioat_interrupt_style,
  1256. sizeof(ioat_interrupt_style), 0644);
  1257. MODULE_PARM_DESC(ioat_interrupt_style,
  1258. "set ioat interrupt style: msix (default), "
  1259. "msix-single-vector, msi, intx)");
  1260. /**
  1261. * ioat_dma_setup_interrupts - setup interrupt handler
  1262. * @device: ioat device
  1263. */
  1264. static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
  1265. {
  1266. struct ioat_dma_chan *ioat_chan;
  1267. int err, i, j, msixcnt;
  1268. u8 intrctrl = 0;
  1269. if (!strcmp(ioat_interrupt_style, "msix"))
  1270. goto msix;
  1271. if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
  1272. goto msix_single_vector;
  1273. if (!strcmp(ioat_interrupt_style, "msi"))
  1274. goto msi;
  1275. if (!strcmp(ioat_interrupt_style, "intx"))
  1276. goto intx;
  1277. dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
  1278. ioat_interrupt_style);
  1279. goto err_no_irq;
  1280. msix:
  1281. /* The number of MSI-X vectors should equal the number of channels */
  1282. msixcnt = device->common.chancnt;
  1283. for (i = 0; i < msixcnt; i++)
  1284. device->msix_entries[i].entry = i;
  1285. err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
  1286. if (err < 0)
  1287. goto msi;
  1288. if (err > 0)
  1289. goto msix_single_vector;
  1290. for (i = 0; i < msixcnt; i++) {
  1291. ioat_chan = ioat_lookup_chan_by_index(device, i);
  1292. err = request_irq(device->msix_entries[i].vector,
  1293. ioat_dma_do_interrupt_msix,
  1294. 0, "ioat-msix", ioat_chan);
  1295. if (err) {
  1296. for (j = 0; j < i; j++) {
  1297. ioat_chan =
  1298. ioat_lookup_chan_by_index(device, j);
  1299. free_irq(device->msix_entries[j].vector,
  1300. ioat_chan);
  1301. }
  1302. goto msix_single_vector;
  1303. }
  1304. }
  1305. intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
  1306. device->irq_mode = msix_multi_vector;
  1307. goto done;
  1308. msix_single_vector:
  1309. device->msix_entries[0].entry = 0;
  1310. err = pci_enable_msix(device->pdev, device->msix_entries, 1);
  1311. if (err)
  1312. goto msi;
  1313. err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
  1314. 0, "ioat-msix", device);
  1315. if (err) {
  1316. pci_disable_msix(device->pdev);
  1317. goto msi;
  1318. }
  1319. device->irq_mode = msix_single_vector;
  1320. goto done;
  1321. msi:
  1322. err = pci_enable_msi(device->pdev);
  1323. if (err)
  1324. goto intx;
  1325. err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
  1326. 0, "ioat-msi", device);
  1327. if (err) {
  1328. pci_disable_msi(device->pdev);
  1329. goto intx;
  1330. }
  1331. /*
  1332. * CB 1.2 devices need a bit set in configuration space to enable MSI
  1333. */
  1334. if (device->version == IOAT_VER_1_2) {
  1335. u32 dmactrl;
  1336. pci_read_config_dword(device->pdev,
  1337. IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
  1338. dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
  1339. pci_write_config_dword(device->pdev,
  1340. IOAT_PCI_DMACTRL_OFFSET, dmactrl);
  1341. }
  1342. device->irq_mode = msi;
  1343. goto done;
  1344. intx:
  1345. err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
  1346. IRQF_SHARED, "ioat-intx", device);
  1347. if (err)
  1348. goto err_no_irq;
  1349. device->irq_mode = intx;
  1350. done:
  1351. intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
  1352. writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
  1353. return 0;
  1354. err_no_irq:
  1355. /* Disable all interrupt generation */
  1356. writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
  1357. dev_err(&device->pdev->dev, "no usable interrupts\n");
  1358. device->irq_mode = none;
  1359. return -1;
  1360. }
  1361. /**
  1362. * ioat_dma_remove_interrupts - remove whatever interrupts were set
  1363. * @device: ioat device
  1364. */
  1365. static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
  1366. {
  1367. struct ioat_dma_chan *ioat_chan;
  1368. int i;
  1369. /* Disable all interrupt generation */
  1370. writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
  1371. switch (device->irq_mode) {
  1372. case msix_multi_vector:
  1373. for (i = 0; i < device->common.chancnt; i++) {
  1374. ioat_chan = ioat_lookup_chan_by_index(device, i);
  1375. free_irq(device->msix_entries[i].vector, ioat_chan);
  1376. }
  1377. pci_disable_msix(device->pdev);
  1378. break;
  1379. case msix_single_vector:
  1380. free_irq(device->msix_entries[0].vector, device);
  1381. pci_disable_msix(device->pdev);
  1382. break;
  1383. case msi:
  1384. free_irq(device->pdev->irq, device);
  1385. pci_disable_msi(device->pdev);
  1386. break;
  1387. case intx:
  1388. free_irq(device->pdev->irq, device);
  1389. break;
  1390. case none:
  1391. dev_warn(&device->pdev->dev,
  1392. "call to %s without interrupts setup\n", __func__);
  1393. }
  1394. device->irq_mode = none;
  1395. }
  1396. struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
  1397. void __iomem *iobase)
  1398. {
  1399. int err;
  1400. struct ioatdma_device *device;
  1401. device = kzalloc(sizeof(*device), GFP_KERNEL);
  1402. if (!device) {
  1403. err = -ENOMEM;
  1404. goto err_kzalloc;
  1405. }
  1406. device->pdev = pdev;
  1407. device->reg_base = iobase;
  1408. device->version = readb(device->reg_base + IOAT_VER_OFFSET);
  1409. /* DMA coherent memory pool for DMA descriptor allocations */
  1410. device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
  1411. sizeof(struct ioat_dma_descriptor),
  1412. 64, 0);
  1413. if (!device->dma_pool) {
  1414. err = -ENOMEM;
  1415. goto err_dma_pool;
  1416. }
  1417. device->completion_pool = pci_pool_create("completion_pool", pdev,
  1418. sizeof(u64), SMP_CACHE_BYTES,
  1419. SMP_CACHE_BYTES);
  1420. if (!device->completion_pool) {
  1421. err = -ENOMEM;
  1422. goto err_completion_pool;
  1423. }
  1424. INIT_LIST_HEAD(&device->common.channels);
  1425. ioat_dma_enumerate_channels(device);
  1426. device->common.device_alloc_chan_resources =
  1427. ioat_dma_alloc_chan_resources;
  1428. device->common.device_free_chan_resources =
  1429. ioat_dma_free_chan_resources;
  1430. device->common.dev = &pdev->dev;
  1431. dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
  1432. device->common.device_is_tx_complete = ioat_dma_is_complete;
  1433. switch (device->version) {
  1434. case IOAT_VER_1_2:
  1435. device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
  1436. device->common.device_issue_pending =
  1437. ioat1_dma_memcpy_issue_pending;
  1438. break;
  1439. case IOAT_VER_2_0:
  1440. case IOAT_VER_3_0:
  1441. device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
  1442. device->common.device_issue_pending =
  1443. ioat2_dma_memcpy_issue_pending;
  1444. break;
  1445. }
  1446. dev_err(&device->pdev->dev,
  1447. "Intel(R) I/OAT DMA Engine found,"
  1448. " %d channels, device version 0x%02x, driver version %s\n",
  1449. device->common.chancnt, device->version, IOAT_DMA_VERSION);
  1450. if (!device->common.chancnt) {
  1451. dev_err(&device->pdev->dev,
  1452. "Intel(R) I/OAT DMA Engine problem found: "
  1453. "zero channels detected\n");
  1454. goto err_setup_interrupts;
  1455. }
  1456. err = ioat_dma_setup_interrupts(device);
  1457. if (err)
  1458. goto err_setup_interrupts;
  1459. err = ioat_dma_self_test(device);
  1460. if (err)
  1461. goto err_self_test;
  1462. ioat_set_tcp_copy_break(device);
  1463. dma_async_device_register(&device->common);
  1464. if (device->version != IOAT_VER_3_0) {
  1465. INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
  1466. schedule_delayed_work(&device->work,
  1467. WATCHDOG_DELAY);
  1468. }
  1469. return device;
  1470. err_self_test:
  1471. ioat_dma_remove_interrupts(device);
  1472. err_setup_interrupts:
  1473. pci_pool_destroy(device->completion_pool);
  1474. err_completion_pool:
  1475. pci_pool_destroy(device->dma_pool);
  1476. err_dma_pool:
  1477. kfree(device);
  1478. err_kzalloc:
  1479. dev_err(&pdev->dev,
  1480. "Intel(R) I/OAT DMA Engine initialization failed\n");
  1481. return NULL;
  1482. }
  1483. void ioat_dma_remove(struct ioatdma_device *device)
  1484. {
  1485. struct dma_chan *chan, *_chan;
  1486. struct ioat_dma_chan *ioat_chan;
  1487. if (device->version != IOAT_VER_3_0)
  1488. cancel_delayed_work(&device->work);
  1489. ioat_dma_remove_interrupts(device);
  1490. dma_async_device_unregister(&device->common);
  1491. pci_pool_destroy(device->dma_pool);
  1492. pci_pool_destroy(device->completion_pool);
  1493. iounmap(device->reg_base);
  1494. pci_release_regions(device->pdev);
  1495. pci_disable_device(device->pdev);
  1496. list_for_each_entry_safe(chan, _chan,
  1497. &device->common.channels, device_node) {
  1498. ioat_chan = to_ioat_chan(chan);
  1499. list_del(&chan->device_node);
  1500. kfree(ioat_chan);
  1501. }
  1502. kfree(device);
  1503. }