dma_v3.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms and conditions of the GNU General Public License,
  11. * version 2, as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along with
  19. * this program; if not, write to the Free Software Foundation, Inc.,
  20. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. *
  22. * The full GNU General Public License is included in this distribution in
  23. * the file called "COPYING".
  24. *
  25. * BSD LICENSE
  26. *
  27. * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions are met:
  31. *
  32. * * Redistributions of source code must retain the above copyright
  33. * notice, this list of conditions and the following disclaimer.
  34. * * Redistributions in binary form must reproduce the above copyright
  35. * notice, this list of conditions and the following disclaimer in
  36. * the documentation and/or other materials provided with the
  37. * distribution.
  38. * * Neither the name of Intel Corporation nor the names of its
  39. * contributors may be used to endorse or promote products derived
  40. * from this software without specific prior written permission.
  41. *
  42. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  43. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  44. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  45. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  46. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  47. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  48. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  49. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  50. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  51. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  52. * POSSIBILITY OF SUCH DAMAGE.
  53. */
  54. /*
  55. * Support routines for v3+ hardware
  56. */
  57. #include <linux/module.h>
  58. #include <linux/pci.h>
  59. #include <linux/gfp.h>
  60. #include <linux/dmaengine.h>
  61. #include <linux/dma-mapping.h>
  62. #include <linux/prefetch.h>
  63. #include "../dmaengine.h"
  64. #include "registers.h"
  65. #include "hw.h"
  66. #include "dma.h"
  67. #include "dma_v2.h"
  68. /* ioat hardware assumes at least two sources for raid operations */
  69. #define src_cnt_to_sw(x) ((x) + 2)
  70. #define src_cnt_to_hw(x) ((x) - 2)
  71. #define ndest_to_sw(x) ((x) + 1)
  72. #define ndest_to_hw(x) ((x) - 1)
  73. #define src16_cnt_to_sw(x) ((x) + 9)
  74. #define src16_cnt_to_hw(x) ((x) - 9)
  75. /* provide a lookup table for setting the source address in the base or
  76. * extended descriptor of an xor or pq descriptor
  77. */
  78. static const u8 xor_idx_to_desc = 0xe0;
  79. static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
  80. static const u8 pq_idx_to_desc = 0xf8;
  81. static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
  82. 2, 2, 2, 2, 2, 2, 2 };
  83. static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
  84. static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
  85. 0, 1, 2, 3, 4, 5, 6 };
  86. /*
  87. * technically sources 1 and 2 do not require SED, but the op will have
  88. * at least 9 descriptors so that's irrelevant.
  89. */
  90. static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
  91. 1, 1, 1, 1, 1, 1, 1 };
  92. static void ioat3_eh(struct ioat2_dma_chan *ioat);
  93. static void xor_set_src(struct ioat_raw_descriptor *descs[2],
  94. dma_addr_t addr, u32 offset, int idx)
  95. {
  96. struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
  97. raw->field[xor_idx_to_field[idx]] = addr + offset;
  98. }
  99. static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
  100. {
  101. struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
  102. return raw->field[pq_idx_to_field[idx]];
  103. }
  104. static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
  105. {
  106. struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
  107. return raw->field[pq16_idx_to_field[idx]];
  108. }
  109. static void pq_set_src(struct ioat_raw_descriptor *descs[2],
  110. dma_addr_t addr, u32 offset, u8 coef, int idx)
  111. {
  112. struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
  113. struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
  114. raw->field[pq_idx_to_field[idx]] = addr + offset;
  115. pq->coef[idx] = coef;
  116. }
  117. static int sed_get_pq16_pool_idx(int src_cnt)
  118. {
  119. return pq16_idx_to_sed[src_cnt];
  120. }
  121. static bool is_jf_ioat(struct pci_dev *pdev)
  122. {
  123. switch (pdev->device) {
  124. case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
  125. case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
  126. case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
  127. case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
  128. case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
  129. case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
  130. case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
  131. case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
  132. case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
  133. case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
  134. return true;
  135. default:
  136. return false;
  137. }
  138. }
  139. static bool is_snb_ioat(struct pci_dev *pdev)
  140. {
  141. switch (pdev->device) {
  142. case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
  143. case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
  144. case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
  145. case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
  146. case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
  147. case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
  148. case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
  149. case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
  150. case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
  151. case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
  152. return true;
  153. default:
  154. return false;
  155. }
  156. }
  157. static bool is_ivb_ioat(struct pci_dev *pdev)
  158. {
  159. switch (pdev->device) {
  160. case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
  161. case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
  162. case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
  163. case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
  164. case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
  165. case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
  166. case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
  167. case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
  168. case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
  169. case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
  170. return true;
  171. default:
  172. return false;
  173. }
  174. }
  175. static bool is_hsw_ioat(struct pci_dev *pdev)
  176. {
  177. switch (pdev->device) {
  178. case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
  179. case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
  180. case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
  181. case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
  182. case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
  183. case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
  184. case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
  185. case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
  186. case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
  187. case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
  188. return true;
  189. default:
  190. return false;
  191. }
  192. }
  193. static bool is_xeon_cb32(struct pci_dev *pdev)
  194. {
  195. return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
  196. is_hsw_ioat(pdev);
  197. }
  198. static bool is_bwd_ioat(struct pci_dev *pdev)
  199. {
  200. switch (pdev->device) {
  201. case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
  202. case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
  203. case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
  204. case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
  205. return true;
  206. default:
  207. return false;
  208. }
  209. }
  210. static bool is_bwd_noraid(struct pci_dev *pdev)
  211. {
  212. switch (pdev->device) {
  213. case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
  214. case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
  215. return true;
  216. default:
  217. return false;
  218. }
  219. }
  220. static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
  221. dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
  222. {
  223. struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
  224. struct ioat_pq16a_descriptor *pq16 =
  225. (struct ioat_pq16a_descriptor *)desc[1];
  226. struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
  227. raw->field[pq16_idx_to_field[idx]] = addr + offset;
  228. if (idx < 8)
  229. pq->coef[idx] = coef;
  230. else
  231. pq16->coef[idx - 8] = coef;
  232. }
  233. static struct ioat_sed_ent *
  234. ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
  235. {
  236. struct ioat_sed_ent *sed;
  237. gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
  238. sed = kmem_cache_alloc(device->sed_pool, flags);
  239. if (!sed)
  240. return NULL;
  241. sed->hw_pool = hw_pool;
  242. sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
  243. flags, &sed->dma);
  244. if (!sed->hw) {
  245. kmem_cache_free(device->sed_pool, sed);
  246. return NULL;
  247. }
  248. return sed;
  249. }
  250. static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
  251. {
  252. if (!sed)
  253. return;
  254. dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
  255. kmem_cache_free(device->sed_pool, sed);
  256. }
  257. static bool desc_has_ext(struct ioat_ring_ent *desc)
  258. {
  259. struct ioat_dma_descriptor *hw = desc->hw;
  260. if (hw->ctl_f.op == IOAT_OP_XOR ||
  261. hw->ctl_f.op == IOAT_OP_XOR_VAL) {
  262. struct ioat_xor_descriptor *xor = desc->xor;
  263. if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
  264. return true;
  265. } else if (hw->ctl_f.op == IOAT_OP_PQ ||
  266. hw->ctl_f.op == IOAT_OP_PQ_VAL) {
  267. struct ioat_pq_descriptor *pq = desc->pq;
  268. if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
  269. return true;
  270. }
  271. return false;
  272. }
  273. static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
  274. {
  275. u64 phys_complete;
  276. u64 completion;
  277. completion = *chan->completion;
  278. phys_complete = ioat_chansts_to_addr(completion);
  279. dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
  280. (unsigned long long) phys_complete);
  281. return phys_complete;
  282. }
  283. static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
  284. u64 *phys_complete)
  285. {
  286. *phys_complete = ioat3_get_current_completion(chan);
  287. if (*phys_complete == chan->last_completion)
  288. return false;
  289. clear_bit(IOAT_COMPLETION_ACK, &chan->state);
  290. mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
  291. return true;
  292. }
  293. static void
  294. desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
  295. {
  296. struct ioat_dma_descriptor *hw = desc->hw;
  297. switch (hw->ctl_f.op) {
  298. case IOAT_OP_PQ_VAL:
  299. case IOAT_OP_PQ_VAL_16S:
  300. {
  301. struct ioat_pq_descriptor *pq = desc->pq;
  302. /* check if there's error written */
  303. if (!pq->dwbes_f.wbes)
  304. return;
  305. /* need to set a chanerr var for checking to clear later */
  306. if (pq->dwbes_f.p_val_err)
  307. *desc->result |= SUM_CHECK_P_RESULT;
  308. if (pq->dwbes_f.q_val_err)
  309. *desc->result |= SUM_CHECK_Q_RESULT;
  310. return;
  311. }
  312. default:
  313. return;
  314. }
  315. }
  316. /**
  317. * __cleanup - reclaim used descriptors
  318. * @ioat: channel (ring) to clean
  319. *
  320. * The difference from the dma_v2.c __cleanup() is that this routine
  321. * handles extended descriptors and dma-unmapping raid operations.
  322. */
  323. static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
  324. {
  325. struct ioat_chan_common *chan = &ioat->base;
  326. struct ioatdma_device *device = chan->device;
  327. struct ioat_ring_ent *desc;
  328. bool seen_current = false;
  329. int idx = ioat->tail, i;
  330. u16 active;
  331. dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
  332. __func__, ioat->head, ioat->tail, ioat->issued);
  333. /*
  334. * At restart of the channel, the completion address and the
  335. * channel status will be 0 due to starting a new chain. Since
  336. * it's new chain and the first descriptor "fails", there is
  337. * nothing to clean up. We do not want to reap the entire submitted
  338. * chain due to this 0 address value and then BUG.
  339. */
  340. if (!phys_complete)
  341. return;
  342. active = ioat2_ring_active(ioat);
  343. for (i = 0; i < active && !seen_current; i++) {
  344. struct dma_async_tx_descriptor *tx;
  345. smp_read_barrier_depends();
  346. prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
  347. desc = ioat2_get_ring_ent(ioat, idx + i);
  348. dump_desc_dbg(ioat, desc);
  349. /* set err stat if we are using dwbes */
  350. if (device->cap & IOAT_CAP_DWBES)
  351. desc_get_errstat(ioat, desc);
  352. tx = &desc->txd;
  353. if (tx->cookie) {
  354. dma_cookie_complete(tx);
  355. dma_descriptor_unmap(tx);
  356. if (tx->callback) {
  357. tx->callback(tx->callback_param);
  358. tx->callback = NULL;
  359. }
  360. }
  361. if (tx->phys == phys_complete)
  362. seen_current = true;
  363. /* skip extended descriptors */
  364. if (desc_has_ext(desc)) {
  365. BUG_ON(i + 1 >= active);
  366. i++;
  367. }
  368. /* cleanup super extended descriptors */
  369. if (desc->sed) {
  370. ioat3_free_sed(device, desc->sed);
  371. desc->sed = NULL;
  372. }
  373. }
  374. smp_mb(); /* finish all descriptor reads before incrementing tail */
  375. ioat->tail = idx + i;
  376. BUG_ON(active && !seen_current); /* no active descs have written a completion? */
  377. chan->last_completion = phys_complete;
  378. if (active - i == 0) {
  379. dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
  380. __func__);
  381. clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
  382. mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
  383. }
  384. /* 5 microsecond delay per pending descriptor */
  385. writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
  386. chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
  387. }
  388. static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
  389. {
  390. struct ioat_chan_common *chan = &ioat->base;
  391. u64 phys_complete;
  392. spin_lock_bh(&chan->cleanup_lock);
  393. if (ioat3_cleanup_preamble(chan, &phys_complete))
  394. __cleanup(ioat, phys_complete);
  395. if (is_ioat_halted(*chan->completion)) {
  396. u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
  397. if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
  398. mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
  399. ioat3_eh(ioat);
  400. }
  401. }
  402. spin_unlock_bh(&chan->cleanup_lock);
  403. }
  404. static void ioat3_cleanup_event(unsigned long data)
  405. {
  406. struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
  407. ioat3_cleanup(ioat);
  408. writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
  409. }
  410. static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
  411. {
  412. struct ioat_chan_common *chan = &ioat->base;
  413. u64 phys_complete;
  414. ioat2_quiesce(chan, 0);
  415. if (ioat3_cleanup_preamble(chan, &phys_complete))
  416. __cleanup(ioat, phys_complete);
  417. __ioat2_restart_chan(ioat);
  418. }
  419. static void ioat3_eh(struct ioat2_dma_chan *ioat)
  420. {
  421. struct ioat_chan_common *chan = &ioat->base;
  422. struct pci_dev *pdev = to_pdev(chan);
  423. struct ioat_dma_descriptor *hw;
  424. u64 phys_complete;
  425. struct ioat_ring_ent *desc;
  426. u32 err_handled = 0;
  427. u32 chanerr_int;
  428. u32 chanerr;
  429. /* cleanup so tail points to descriptor that caused the error */
  430. if (ioat3_cleanup_preamble(chan, &phys_complete))
  431. __cleanup(ioat, phys_complete);
  432. chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
  433. pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
  434. dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
  435. __func__, chanerr, chanerr_int);
  436. desc = ioat2_get_ring_ent(ioat, ioat->tail);
  437. hw = desc->hw;
  438. dump_desc_dbg(ioat, desc);
  439. switch (hw->ctl_f.op) {
  440. case IOAT_OP_XOR_VAL:
  441. if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
  442. *desc->result |= SUM_CHECK_P_RESULT;
  443. err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
  444. }
  445. break;
  446. case IOAT_OP_PQ_VAL:
  447. case IOAT_OP_PQ_VAL_16S:
  448. if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
  449. *desc->result |= SUM_CHECK_P_RESULT;
  450. err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
  451. }
  452. if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
  453. *desc->result |= SUM_CHECK_Q_RESULT;
  454. err_handled |= IOAT_CHANERR_XOR_Q_ERR;
  455. }
  456. break;
  457. }
  458. /* fault on unhandled error or spurious halt */
  459. if (chanerr ^ err_handled || chanerr == 0) {
  460. dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
  461. __func__, chanerr, err_handled);
  462. BUG();
  463. }
  464. writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
  465. pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
  466. /* mark faulting descriptor as complete */
  467. *chan->completion = desc->txd.phys;
  468. spin_lock_bh(&ioat->prep_lock);
  469. ioat3_restart_channel(ioat);
  470. spin_unlock_bh(&ioat->prep_lock);
  471. }
  472. static void check_active(struct ioat2_dma_chan *ioat)
  473. {
  474. struct ioat_chan_common *chan = &ioat->base;
  475. if (ioat2_ring_active(ioat)) {
  476. mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
  477. return;
  478. }
  479. if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
  480. mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
  481. else if (ioat->alloc_order > ioat_get_alloc_order()) {
  482. /* if the ring is idle, empty, and oversized try to step
  483. * down the size
  484. */
  485. reshape_ring(ioat, ioat->alloc_order - 1);
  486. /* keep shrinking until we get back to our minimum
  487. * default size
  488. */
  489. if (ioat->alloc_order > ioat_get_alloc_order())
  490. mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
  491. }
  492. }
  493. static void ioat3_timer_event(unsigned long data)
  494. {
  495. struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
  496. struct ioat_chan_common *chan = &ioat->base;
  497. dma_addr_t phys_complete;
  498. u64 status;
  499. status = ioat_chansts(chan);
  500. /* when halted due to errors check for channel
  501. * programming errors before advancing the completion state
  502. */
  503. if (is_ioat_halted(status)) {
  504. u32 chanerr;
  505. chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
  506. dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
  507. __func__, chanerr);
  508. if (test_bit(IOAT_RUN, &chan->state))
  509. BUG_ON(is_ioat_bug(chanerr));
  510. else /* we never got off the ground */
  511. return;
  512. }
  513. /* if we haven't made progress and we have already
  514. * acknowledged a pending completion once, then be more
  515. * forceful with a restart
  516. */
  517. spin_lock_bh(&chan->cleanup_lock);
  518. if (ioat_cleanup_preamble(chan, &phys_complete))
  519. __cleanup(ioat, phys_complete);
  520. else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
  521. spin_lock_bh(&ioat->prep_lock);
  522. ioat3_restart_channel(ioat);
  523. spin_unlock_bh(&ioat->prep_lock);
  524. spin_unlock_bh(&chan->cleanup_lock);
  525. return;
  526. } else {
  527. set_bit(IOAT_COMPLETION_ACK, &chan->state);
  528. mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
  529. }
  530. if (ioat2_ring_active(ioat))
  531. mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
  532. else {
  533. spin_lock_bh(&ioat->prep_lock);
  534. check_active(ioat);
  535. spin_unlock_bh(&ioat->prep_lock);
  536. }
  537. spin_unlock_bh(&chan->cleanup_lock);
  538. }
  539. static enum dma_status
  540. ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
  541. struct dma_tx_state *txstate)
  542. {
  543. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  544. enum dma_status ret;
  545. ret = dma_cookie_status(c, cookie, txstate);
  546. if (ret == DMA_SUCCESS)
  547. return ret;
  548. ioat3_cleanup(ioat);
  549. return dma_cookie_status(c, cookie, txstate);
  550. }
  551. static struct dma_async_tx_descriptor *
  552. __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
  553. dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
  554. size_t len, unsigned long flags)
  555. {
  556. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  557. struct ioat_ring_ent *compl_desc;
  558. struct ioat_ring_ent *desc;
  559. struct ioat_ring_ent *ext;
  560. size_t total_len = len;
  561. struct ioat_xor_descriptor *xor;
  562. struct ioat_xor_ext_descriptor *xor_ex = NULL;
  563. struct ioat_dma_descriptor *hw;
  564. int num_descs, with_ext, idx, i;
  565. u32 offset = 0;
  566. u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
  567. BUG_ON(src_cnt < 2);
  568. num_descs = ioat2_xferlen_to_descs(ioat, len);
  569. /* we need 2x the number of descriptors to cover greater than 5
  570. * sources
  571. */
  572. if (src_cnt > 5) {
  573. with_ext = 1;
  574. num_descs *= 2;
  575. } else
  576. with_ext = 0;
  577. /* completion writes from the raid engine may pass completion
  578. * writes from the legacy engine, so we need one extra null
  579. * (legacy) descriptor to ensure all completion writes arrive in
  580. * order.
  581. */
  582. if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
  583. idx = ioat->head;
  584. else
  585. return NULL;
  586. i = 0;
  587. do {
  588. struct ioat_raw_descriptor *descs[2];
  589. size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
  590. int s;
  591. desc = ioat2_get_ring_ent(ioat, idx + i);
  592. xor = desc->xor;
  593. /* save a branch by unconditionally retrieving the
  594. * extended descriptor xor_set_src() knows to not write
  595. * to it in the single descriptor case
  596. */
  597. ext = ioat2_get_ring_ent(ioat, idx + i + 1);
  598. xor_ex = ext->xor_ex;
  599. descs[0] = (struct ioat_raw_descriptor *) xor;
  600. descs[1] = (struct ioat_raw_descriptor *) xor_ex;
  601. for (s = 0; s < src_cnt; s++)
  602. xor_set_src(descs, src[s], offset, s);
  603. xor->size = xfer_size;
  604. xor->dst_addr = dest + offset;
  605. xor->ctl = 0;
  606. xor->ctl_f.op = op;
  607. xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
  608. len -= xfer_size;
  609. offset += xfer_size;
  610. dump_desc_dbg(ioat, desc);
  611. } while ((i += 1 + with_ext) < num_descs);
  612. /* last xor descriptor carries the unmap parameters and fence bit */
  613. desc->txd.flags = flags;
  614. desc->len = total_len;
  615. if (result)
  616. desc->result = result;
  617. xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
  618. /* completion descriptor carries interrupt bit */
  619. compl_desc = ioat2_get_ring_ent(ioat, idx + i);
  620. compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
  621. hw = compl_desc->hw;
  622. hw->ctl = 0;
  623. hw->ctl_f.null = 1;
  624. hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
  625. hw->ctl_f.compl_write = 1;
  626. hw->size = NULL_DESC_BUFFER_SIZE;
  627. dump_desc_dbg(ioat, compl_desc);
  628. /* we leave the channel locked to ensure in order submission */
  629. return &compl_desc->txd;
  630. }
  631. static struct dma_async_tx_descriptor *
  632. ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
  633. unsigned int src_cnt, size_t len, unsigned long flags)
  634. {
  635. return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
  636. }
  637. struct dma_async_tx_descriptor *
  638. ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
  639. unsigned int src_cnt, size_t len,
  640. enum sum_check_flags *result, unsigned long flags)
  641. {
  642. /* the cleanup routine only sets bits on validate failure, it
  643. * does not clear bits on validate success... so clear it here
  644. */
  645. *result = 0;
  646. return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
  647. src_cnt - 1, len, flags);
  648. }
  649. static void
  650. dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
  651. {
  652. struct device *dev = to_dev(&ioat->base);
  653. struct ioat_pq_descriptor *pq = desc->pq;
  654. struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
  655. struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
  656. int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
  657. int i;
  658. dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
  659. " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
  660. " src_cnt: %d)\n",
  661. desc_id(desc), (unsigned long long) desc->txd.phys,
  662. (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
  663. desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
  664. pq->ctl_f.compl_write,
  665. pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
  666. pq->ctl_f.src_cnt);
  667. for (i = 0; i < src_cnt; i++)
  668. dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
  669. (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
  670. dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
  671. dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
  672. dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
  673. }
  674. static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
  675. struct ioat_ring_ent *desc)
  676. {
  677. struct device *dev = to_dev(&ioat->base);
  678. struct ioat_pq_descriptor *pq = desc->pq;
  679. struct ioat_raw_descriptor *descs[] = { (void *)pq,
  680. (void *)pq,
  681. (void *)pq };
  682. int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
  683. int i;
  684. if (desc->sed) {
  685. descs[1] = (void *)desc->sed->hw;
  686. descs[2] = (void *)desc->sed->hw + 64;
  687. }
  688. dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
  689. " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
  690. " src_cnt: %d)\n",
  691. desc_id(desc), (unsigned long long) desc->txd.phys,
  692. (unsigned long long) pq->next,
  693. desc->txd.flags, pq->size, pq->ctl,
  694. pq->ctl_f.op, pq->ctl_f.int_en,
  695. pq->ctl_f.compl_write,
  696. pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
  697. pq->ctl_f.src_cnt);
  698. for (i = 0; i < src_cnt; i++) {
  699. dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
  700. (unsigned long long) pq16_get_src(descs, i),
  701. pq->coef[i]);
  702. }
  703. dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
  704. dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
  705. }
  706. static struct dma_async_tx_descriptor *
  707. __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
  708. const dma_addr_t *dst, const dma_addr_t *src,
  709. unsigned int src_cnt, const unsigned char *scf,
  710. size_t len, unsigned long flags)
  711. {
  712. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  713. struct ioat_chan_common *chan = &ioat->base;
  714. struct ioatdma_device *device = chan->device;
  715. struct ioat_ring_ent *compl_desc;
  716. struct ioat_ring_ent *desc;
  717. struct ioat_ring_ent *ext;
  718. size_t total_len = len;
  719. struct ioat_pq_descriptor *pq;
  720. struct ioat_pq_ext_descriptor *pq_ex = NULL;
  721. struct ioat_dma_descriptor *hw;
  722. u32 offset = 0;
  723. u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
  724. int i, s, idx, with_ext, num_descs;
  725. int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
  726. dev_dbg(to_dev(chan), "%s\n", __func__);
  727. /* the engine requires at least two sources (we provide
  728. * at least 1 implied source in the DMA_PREP_CONTINUE case)
  729. */
  730. BUG_ON(src_cnt + dmaf_continue(flags) < 2);
  731. num_descs = ioat2_xferlen_to_descs(ioat, len);
  732. /* we need 2x the number of descriptors to cover greater than 3
  733. * sources (we need 1 extra source in the q-only continuation
  734. * case and 3 extra sources in the p+q continuation case.
  735. */
  736. if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
  737. (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
  738. with_ext = 1;
  739. num_descs *= 2;
  740. } else
  741. with_ext = 0;
  742. /* completion writes from the raid engine may pass completion
  743. * writes from the legacy engine, so we need one extra null
  744. * (legacy) descriptor to ensure all completion writes arrive in
  745. * order.
  746. */
  747. if (likely(num_descs) &&
  748. ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
  749. idx = ioat->head;
  750. else
  751. return NULL;
  752. i = 0;
  753. do {
  754. struct ioat_raw_descriptor *descs[2];
  755. size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
  756. desc = ioat2_get_ring_ent(ioat, idx + i);
  757. pq = desc->pq;
  758. /* save a branch by unconditionally retrieving the
  759. * extended descriptor pq_set_src() knows to not write
  760. * to it in the single descriptor case
  761. */
  762. ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
  763. pq_ex = ext->pq_ex;
  764. descs[0] = (struct ioat_raw_descriptor *) pq;
  765. descs[1] = (struct ioat_raw_descriptor *) pq_ex;
  766. for (s = 0; s < src_cnt; s++)
  767. pq_set_src(descs, src[s], offset, scf[s], s);
  768. /* see the comment for dma_maxpq in include/linux/dmaengine.h */
  769. if (dmaf_p_disabled_continue(flags))
  770. pq_set_src(descs, dst[1], offset, 1, s++);
  771. else if (dmaf_continue(flags)) {
  772. pq_set_src(descs, dst[0], offset, 0, s++);
  773. pq_set_src(descs, dst[1], offset, 1, s++);
  774. pq_set_src(descs, dst[1], offset, 0, s++);
  775. }
  776. pq->size = xfer_size;
  777. pq->p_addr = dst[0] + offset;
  778. pq->q_addr = dst[1] + offset;
  779. pq->ctl = 0;
  780. pq->ctl_f.op = op;
  781. /* we turn on descriptor write back error status */
  782. if (device->cap & IOAT_CAP_DWBES)
  783. pq->ctl_f.wb_en = result ? 1 : 0;
  784. pq->ctl_f.src_cnt = src_cnt_to_hw(s);
  785. pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
  786. pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
  787. len -= xfer_size;
  788. offset += xfer_size;
  789. } while ((i += 1 + with_ext) < num_descs);
  790. /* last pq descriptor carries the unmap parameters and fence bit */
  791. desc->txd.flags = flags;
  792. desc->len = total_len;
  793. if (result)
  794. desc->result = result;
  795. pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
  796. dump_pq_desc_dbg(ioat, desc, ext);
  797. if (!cb32) {
  798. pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
  799. pq->ctl_f.compl_write = 1;
  800. compl_desc = desc;
  801. } else {
  802. /* completion descriptor carries interrupt bit */
  803. compl_desc = ioat2_get_ring_ent(ioat, idx + i);
  804. compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
  805. hw = compl_desc->hw;
  806. hw->ctl = 0;
  807. hw->ctl_f.null = 1;
  808. hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
  809. hw->ctl_f.compl_write = 1;
  810. hw->size = NULL_DESC_BUFFER_SIZE;
  811. dump_desc_dbg(ioat, compl_desc);
  812. }
  813. /* we leave the channel locked to ensure in order submission */
  814. return &compl_desc->txd;
  815. }
  816. static struct dma_async_tx_descriptor *
  817. __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
  818. const dma_addr_t *dst, const dma_addr_t *src,
  819. unsigned int src_cnt, const unsigned char *scf,
  820. size_t len, unsigned long flags)
  821. {
  822. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  823. struct ioat_chan_common *chan = &ioat->base;
  824. struct ioatdma_device *device = chan->device;
  825. struct ioat_ring_ent *desc;
  826. size_t total_len = len;
  827. struct ioat_pq_descriptor *pq;
  828. u32 offset = 0;
  829. u8 op;
  830. int i, s, idx, num_descs;
  831. /* this function only handles src_cnt 9 - 16 */
  832. BUG_ON(src_cnt < 9);
  833. /* this function is only called with 9-16 sources */
  834. op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
  835. dev_dbg(to_dev(chan), "%s\n", __func__);
  836. num_descs = ioat2_xferlen_to_descs(ioat, len);
  837. /*
  838. * 16 source pq is only available on cb3.3 and has no completion
  839. * write hw bug.
  840. */
  841. if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
  842. idx = ioat->head;
  843. else
  844. return NULL;
  845. i = 0;
  846. do {
  847. struct ioat_raw_descriptor *descs[4];
  848. size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
  849. desc = ioat2_get_ring_ent(ioat, idx + i);
  850. pq = desc->pq;
  851. descs[0] = (struct ioat_raw_descriptor *) pq;
  852. desc->sed = ioat3_alloc_sed(device,
  853. sed_get_pq16_pool_idx(src_cnt));
  854. if (!desc->sed) {
  855. dev_err(to_dev(chan),
  856. "%s: no free sed entries\n", __func__);
  857. return NULL;
  858. }
  859. pq->sed_addr = desc->sed->dma;
  860. desc->sed->parent = desc;
  861. descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
  862. descs[2] = (void *)descs[1] + 64;
  863. for (s = 0; s < src_cnt; s++)
  864. pq16_set_src(descs, src[s], offset, scf[s], s);
  865. /* see the comment for dma_maxpq in include/linux/dmaengine.h */
  866. if (dmaf_p_disabled_continue(flags))
  867. pq16_set_src(descs, dst[1], offset, 1, s++);
  868. else if (dmaf_continue(flags)) {
  869. pq16_set_src(descs, dst[0], offset, 0, s++);
  870. pq16_set_src(descs, dst[1], offset, 1, s++);
  871. pq16_set_src(descs, dst[1], offset, 0, s++);
  872. }
  873. pq->size = xfer_size;
  874. pq->p_addr = dst[0] + offset;
  875. pq->q_addr = dst[1] + offset;
  876. pq->ctl = 0;
  877. pq->ctl_f.op = op;
  878. pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
  879. /* we turn on descriptor write back error status */
  880. if (device->cap & IOAT_CAP_DWBES)
  881. pq->ctl_f.wb_en = result ? 1 : 0;
  882. pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
  883. pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
  884. len -= xfer_size;
  885. offset += xfer_size;
  886. } while (++i < num_descs);
  887. /* last pq descriptor carries the unmap parameters and fence bit */
  888. desc->txd.flags = flags;
  889. desc->len = total_len;
  890. if (result)
  891. desc->result = result;
  892. pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
  893. /* with cb3.3 we should be able to do completion w/o a null desc */
  894. pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
  895. pq->ctl_f.compl_write = 1;
  896. dump_pq16_desc_dbg(ioat, desc);
  897. /* we leave the channel locked to ensure in order submission */
  898. return &desc->txd;
  899. }
  900. static struct dma_async_tx_descriptor *
  901. ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
  902. unsigned int src_cnt, const unsigned char *scf, size_t len,
  903. unsigned long flags)
  904. {
  905. struct dma_device *dma = chan->device;
  906. /* specify valid address for disabled result */
  907. if (flags & DMA_PREP_PQ_DISABLE_P)
  908. dst[0] = dst[1];
  909. if (flags & DMA_PREP_PQ_DISABLE_Q)
  910. dst[1] = dst[0];
  911. /* handle the single source multiply case from the raid6
  912. * recovery path
  913. */
  914. if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
  915. dma_addr_t single_source[2];
  916. unsigned char single_source_coef[2];
  917. BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
  918. single_source[0] = src[0];
  919. single_source[1] = src[0];
  920. single_source_coef[0] = scf[0];
  921. single_source_coef[1] = 0;
  922. return (src_cnt > 8) && (dma->max_pq > 8) ?
  923. __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
  924. 2, single_source_coef, len,
  925. flags) :
  926. __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
  927. single_source_coef, len, flags);
  928. } else {
  929. return (src_cnt > 8) && (dma->max_pq > 8) ?
  930. __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
  931. scf, len, flags) :
  932. __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
  933. scf, len, flags);
  934. }
  935. }
  936. struct dma_async_tx_descriptor *
  937. ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
  938. unsigned int src_cnt, const unsigned char *scf, size_t len,
  939. enum sum_check_flags *pqres, unsigned long flags)
  940. {
  941. struct dma_device *dma = chan->device;
  942. /* specify valid address for disabled result */
  943. if (flags & DMA_PREP_PQ_DISABLE_P)
  944. pq[0] = pq[1];
  945. if (flags & DMA_PREP_PQ_DISABLE_Q)
  946. pq[1] = pq[0];
  947. /* the cleanup routine only sets bits on validate failure, it
  948. * does not clear bits on validate success... so clear it here
  949. */
  950. *pqres = 0;
  951. return (src_cnt > 8) && (dma->max_pq > 8) ?
  952. __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
  953. flags) :
  954. __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
  955. flags);
  956. }
  957. static struct dma_async_tx_descriptor *
  958. ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
  959. unsigned int src_cnt, size_t len, unsigned long flags)
  960. {
  961. struct dma_device *dma = chan->device;
  962. unsigned char scf[src_cnt];
  963. dma_addr_t pq[2];
  964. memset(scf, 0, src_cnt);
  965. pq[0] = dst;
  966. flags |= DMA_PREP_PQ_DISABLE_Q;
  967. pq[1] = dst; /* specify valid address for disabled result */
  968. return (src_cnt > 8) && (dma->max_pq > 8) ?
  969. __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
  970. flags) :
  971. __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
  972. flags);
  973. }
  974. struct dma_async_tx_descriptor *
  975. ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
  976. unsigned int src_cnt, size_t len,
  977. enum sum_check_flags *result, unsigned long flags)
  978. {
  979. struct dma_device *dma = chan->device;
  980. unsigned char scf[src_cnt];
  981. dma_addr_t pq[2];
  982. /* the cleanup routine only sets bits on validate failure, it
  983. * does not clear bits on validate success... so clear it here
  984. */
  985. *result = 0;
  986. memset(scf, 0, src_cnt);
  987. pq[0] = src[0];
  988. flags |= DMA_PREP_PQ_DISABLE_Q;
  989. pq[1] = pq[0]; /* specify valid address for disabled result */
  990. return (src_cnt > 8) && (dma->max_pq > 8) ?
  991. __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
  992. scf, len, flags) :
  993. __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
  994. scf, len, flags);
  995. }
  996. static struct dma_async_tx_descriptor *
  997. ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
  998. {
  999. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  1000. struct ioat_ring_ent *desc;
  1001. struct ioat_dma_descriptor *hw;
  1002. if (ioat2_check_space_lock(ioat, 1) == 0)
  1003. desc = ioat2_get_ring_ent(ioat, ioat->head);
  1004. else
  1005. return NULL;
  1006. hw = desc->hw;
  1007. hw->ctl = 0;
  1008. hw->ctl_f.null = 1;
  1009. hw->ctl_f.int_en = 1;
  1010. hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
  1011. hw->ctl_f.compl_write = 1;
  1012. hw->size = NULL_DESC_BUFFER_SIZE;
  1013. hw->src_addr = 0;
  1014. hw->dst_addr = 0;
  1015. desc->txd.flags = flags;
  1016. desc->len = 1;
  1017. dump_desc_dbg(ioat, desc);
  1018. /* we leave the channel locked to ensure in order submission */
  1019. return &desc->txd;
  1020. }
  1021. static void ioat3_dma_test_callback(void *dma_async_param)
  1022. {
  1023. struct completion *cmp = dma_async_param;
  1024. complete(cmp);
  1025. }
  1026. #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
  1027. static int ioat_xor_val_self_test(struct ioatdma_device *device)
  1028. {
  1029. int i, src_idx;
  1030. struct page *dest;
  1031. struct page *xor_srcs[IOAT_NUM_SRC_TEST];
  1032. struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
  1033. dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
  1034. dma_addr_t dest_dma;
  1035. struct dma_async_tx_descriptor *tx;
  1036. struct dma_chan *dma_chan;
  1037. dma_cookie_t cookie;
  1038. u8 cmp_byte = 0;
  1039. u32 cmp_word;
  1040. u32 xor_val_result;
  1041. int err = 0;
  1042. struct completion cmp;
  1043. unsigned long tmo;
  1044. struct device *dev = &device->pdev->dev;
  1045. struct dma_device *dma = &device->common;
  1046. u8 op = 0;
  1047. dev_dbg(dev, "%s\n", __func__);
  1048. if (!dma_has_cap(DMA_XOR, dma->cap_mask))
  1049. return 0;
  1050. for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
  1051. xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
  1052. if (!xor_srcs[src_idx]) {
  1053. while (src_idx--)
  1054. __free_page(xor_srcs[src_idx]);
  1055. return -ENOMEM;
  1056. }
  1057. }
  1058. dest = alloc_page(GFP_KERNEL);
  1059. if (!dest) {
  1060. while (src_idx--)
  1061. __free_page(xor_srcs[src_idx]);
  1062. return -ENOMEM;
  1063. }
  1064. /* Fill in src buffers */
  1065. for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
  1066. u8 *ptr = page_address(xor_srcs[src_idx]);
  1067. for (i = 0; i < PAGE_SIZE; i++)
  1068. ptr[i] = (1 << src_idx);
  1069. }
  1070. for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
  1071. cmp_byte ^= (u8) (1 << src_idx);
  1072. cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
  1073. (cmp_byte << 8) | cmp_byte;
  1074. memset(page_address(dest), 0, PAGE_SIZE);
  1075. dma_chan = container_of(dma->channels.next, struct dma_chan,
  1076. device_node);
  1077. if (dma->device_alloc_chan_resources(dma_chan) < 1) {
  1078. err = -ENODEV;
  1079. goto out;
  1080. }
  1081. /* test xor */
  1082. op = IOAT_OP_XOR;
  1083. dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
  1084. for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
  1085. dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
  1086. DMA_TO_DEVICE);
  1087. tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
  1088. IOAT_NUM_SRC_TEST, PAGE_SIZE,
  1089. DMA_PREP_INTERRUPT);
  1090. if (!tx) {
  1091. dev_err(dev, "Self-test xor prep failed\n");
  1092. err = -ENODEV;
  1093. goto dma_unmap;
  1094. }
  1095. async_tx_ack(tx);
  1096. init_completion(&cmp);
  1097. tx->callback = ioat3_dma_test_callback;
  1098. tx->callback_param = &cmp;
  1099. cookie = tx->tx_submit(tx);
  1100. if (cookie < 0) {
  1101. dev_err(dev, "Self-test xor setup failed\n");
  1102. err = -ENODEV;
  1103. goto dma_unmap;
  1104. }
  1105. dma->device_issue_pending(dma_chan);
  1106. tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
  1107. if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
  1108. dev_err(dev, "Self-test xor timed out\n");
  1109. err = -ENODEV;
  1110. goto dma_unmap;
  1111. }
  1112. dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
  1113. for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
  1114. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
  1115. dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
  1116. for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
  1117. u32 *ptr = page_address(dest);
  1118. if (ptr[i] != cmp_word) {
  1119. dev_err(dev, "Self-test xor failed compare\n");
  1120. err = -ENODEV;
  1121. goto free_resources;
  1122. }
  1123. }
  1124. dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
  1125. /* skip validate if the capability is not present */
  1126. if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
  1127. goto free_resources;
  1128. op = IOAT_OP_XOR_VAL;
  1129. /* validate the sources with the destintation page */
  1130. for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
  1131. xor_val_srcs[i] = xor_srcs[i];
  1132. xor_val_srcs[i] = dest;
  1133. xor_val_result = 1;
  1134. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1135. dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
  1136. DMA_TO_DEVICE);
  1137. tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
  1138. IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
  1139. &xor_val_result, DMA_PREP_INTERRUPT);
  1140. if (!tx) {
  1141. dev_err(dev, "Self-test zero prep failed\n");
  1142. err = -ENODEV;
  1143. goto dma_unmap;
  1144. }
  1145. async_tx_ack(tx);
  1146. init_completion(&cmp);
  1147. tx->callback = ioat3_dma_test_callback;
  1148. tx->callback_param = &cmp;
  1149. cookie = tx->tx_submit(tx);
  1150. if (cookie < 0) {
  1151. dev_err(dev, "Self-test zero setup failed\n");
  1152. err = -ENODEV;
  1153. goto dma_unmap;
  1154. }
  1155. dma->device_issue_pending(dma_chan);
  1156. tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
  1157. if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
  1158. dev_err(dev, "Self-test validate timed out\n");
  1159. err = -ENODEV;
  1160. goto dma_unmap;
  1161. }
  1162. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1163. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
  1164. if (xor_val_result != 0) {
  1165. dev_err(dev, "Self-test validate failed compare\n");
  1166. err = -ENODEV;
  1167. goto free_resources;
  1168. }
  1169. memset(page_address(dest), 0, PAGE_SIZE);
  1170. /* test for non-zero parity sum */
  1171. op = IOAT_OP_XOR_VAL;
  1172. xor_val_result = 0;
  1173. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1174. dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
  1175. DMA_TO_DEVICE);
  1176. tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
  1177. IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
  1178. &xor_val_result, DMA_PREP_INTERRUPT);
  1179. if (!tx) {
  1180. dev_err(dev, "Self-test 2nd zero prep failed\n");
  1181. err = -ENODEV;
  1182. goto dma_unmap;
  1183. }
  1184. async_tx_ack(tx);
  1185. init_completion(&cmp);
  1186. tx->callback = ioat3_dma_test_callback;
  1187. tx->callback_param = &cmp;
  1188. cookie = tx->tx_submit(tx);
  1189. if (cookie < 0) {
  1190. dev_err(dev, "Self-test 2nd zero setup failed\n");
  1191. err = -ENODEV;
  1192. goto dma_unmap;
  1193. }
  1194. dma->device_issue_pending(dma_chan);
  1195. tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
  1196. if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
  1197. dev_err(dev, "Self-test 2nd validate timed out\n");
  1198. err = -ENODEV;
  1199. goto dma_unmap;
  1200. }
  1201. if (xor_val_result != SUM_CHECK_P_RESULT) {
  1202. dev_err(dev, "Self-test validate failed compare\n");
  1203. err = -ENODEV;
  1204. goto dma_unmap;
  1205. }
  1206. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1207. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
  1208. goto free_resources;
  1209. dma_unmap:
  1210. if (op == IOAT_OP_XOR) {
  1211. dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
  1212. for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
  1213. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
  1214. DMA_TO_DEVICE);
  1215. } else if (op == IOAT_OP_XOR_VAL) {
  1216. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1217. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
  1218. DMA_TO_DEVICE);
  1219. }
  1220. free_resources:
  1221. dma->device_free_chan_resources(dma_chan);
  1222. out:
  1223. src_idx = IOAT_NUM_SRC_TEST;
  1224. while (src_idx--)
  1225. __free_page(xor_srcs[src_idx]);
  1226. __free_page(dest);
  1227. return err;
  1228. }
  1229. static int ioat3_dma_self_test(struct ioatdma_device *device)
  1230. {
  1231. int rc = ioat_dma_self_test(device);
  1232. if (rc)
  1233. return rc;
  1234. rc = ioat_xor_val_self_test(device);
  1235. if (rc)
  1236. return rc;
  1237. return 0;
  1238. }
  1239. static int ioat3_irq_reinit(struct ioatdma_device *device)
  1240. {
  1241. int msixcnt = device->common.chancnt;
  1242. struct pci_dev *pdev = device->pdev;
  1243. int i;
  1244. struct msix_entry *msix;
  1245. struct ioat_chan_common *chan;
  1246. int err = 0;
  1247. switch (device->irq_mode) {
  1248. case IOAT_MSIX:
  1249. for (i = 0; i < msixcnt; i++) {
  1250. msix = &device->msix_entries[i];
  1251. chan = ioat_chan_by_index(device, i);
  1252. devm_free_irq(&pdev->dev, msix->vector, chan);
  1253. }
  1254. pci_disable_msix(pdev);
  1255. break;
  1256. case IOAT_MSIX_SINGLE:
  1257. msix = &device->msix_entries[0];
  1258. chan = ioat_chan_by_index(device, 0);
  1259. devm_free_irq(&pdev->dev, msix->vector, chan);
  1260. pci_disable_msix(pdev);
  1261. break;
  1262. case IOAT_MSI:
  1263. chan = ioat_chan_by_index(device, 0);
  1264. devm_free_irq(&pdev->dev, pdev->irq, chan);
  1265. pci_disable_msi(pdev);
  1266. break;
  1267. case IOAT_INTX:
  1268. chan = ioat_chan_by_index(device, 0);
  1269. devm_free_irq(&pdev->dev, pdev->irq, chan);
  1270. break;
  1271. default:
  1272. return 0;
  1273. }
  1274. device->irq_mode = IOAT_NOIRQ;
  1275. err = ioat_dma_setup_interrupts(device);
  1276. return err;
  1277. }
  1278. static int ioat3_reset_hw(struct ioat_chan_common *chan)
  1279. {
  1280. /* throw away whatever the channel was doing and get it
  1281. * initialized, with ioat3 specific workarounds
  1282. */
  1283. struct ioatdma_device *device = chan->device;
  1284. struct pci_dev *pdev = device->pdev;
  1285. u32 chanerr;
  1286. u16 dev_id;
  1287. int err;
  1288. ioat2_quiesce(chan, msecs_to_jiffies(100));
  1289. chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
  1290. writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
  1291. if (device->version < IOAT_VER_3_3) {
  1292. /* clear any pending errors */
  1293. err = pci_read_config_dword(pdev,
  1294. IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
  1295. if (err) {
  1296. dev_err(&pdev->dev,
  1297. "channel error register unreachable\n");
  1298. return err;
  1299. }
  1300. pci_write_config_dword(pdev,
  1301. IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
  1302. /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
  1303. * (workaround for spurious config parity error after restart)
  1304. */
  1305. pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
  1306. if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
  1307. pci_write_config_dword(pdev,
  1308. IOAT_PCI_DMAUNCERRSTS_OFFSET,
  1309. 0x10);
  1310. }
  1311. }
  1312. err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
  1313. if (err) {
  1314. dev_err(&pdev->dev, "Failed to reset!\n");
  1315. return err;
  1316. }
  1317. if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev))
  1318. err = ioat3_irq_reinit(device);
  1319. return err;
  1320. }
  1321. static void ioat3_intr_quirk(struct ioatdma_device *device)
  1322. {
  1323. struct dma_device *dma;
  1324. struct dma_chan *c;
  1325. struct ioat_chan_common *chan;
  1326. u32 errmask;
  1327. dma = &device->common;
  1328. /*
  1329. * if we have descriptor write back error status, we mask the
  1330. * error interrupts
  1331. */
  1332. if (device->cap & IOAT_CAP_DWBES) {
  1333. list_for_each_entry(c, &dma->channels, device_node) {
  1334. chan = to_chan_common(c);
  1335. errmask = readl(chan->reg_base +
  1336. IOAT_CHANERR_MASK_OFFSET);
  1337. errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
  1338. IOAT_CHANERR_XOR_Q_ERR;
  1339. writel(errmask, chan->reg_base +
  1340. IOAT_CHANERR_MASK_OFFSET);
  1341. }
  1342. }
  1343. }
  1344. int ioat3_dma_probe(struct ioatdma_device *device, int dca)
  1345. {
  1346. struct pci_dev *pdev = device->pdev;
  1347. int dca_en = system_has_dca_enabled(pdev);
  1348. struct dma_device *dma;
  1349. struct dma_chan *c;
  1350. struct ioat_chan_common *chan;
  1351. bool is_raid_device = false;
  1352. int err;
  1353. device->enumerate_channels = ioat2_enumerate_channels;
  1354. device->reset_hw = ioat3_reset_hw;
  1355. device->self_test = ioat3_dma_self_test;
  1356. device->intr_quirk = ioat3_intr_quirk;
  1357. dma = &device->common;
  1358. dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
  1359. dma->device_issue_pending = ioat2_issue_pending;
  1360. dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
  1361. dma->device_free_chan_resources = ioat2_free_chan_resources;
  1362. dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
  1363. dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
  1364. device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
  1365. if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
  1366. device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
  1367. /* dca is incompatible with raid operations */
  1368. if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
  1369. device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
  1370. if (device->cap & IOAT_CAP_XOR) {
  1371. is_raid_device = true;
  1372. dma->max_xor = 8;
  1373. dma_cap_set(DMA_XOR, dma->cap_mask);
  1374. dma->device_prep_dma_xor = ioat3_prep_xor;
  1375. dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
  1376. dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
  1377. }
  1378. if (device->cap & IOAT_CAP_PQ) {
  1379. is_raid_device = true;
  1380. dma->device_prep_dma_pq = ioat3_prep_pq;
  1381. dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
  1382. dma_cap_set(DMA_PQ, dma->cap_mask);
  1383. dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
  1384. if (device->cap & IOAT_CAP_RAID16SS) {
  1385. dma_set_maxpq(dma, 16, 0);
  1386. } else {
  1387. dma_set_maxpq(dma, 8, 0);
  1388. }
  1389. if (!(device->cap & IOAT_CAP_XOR)) {
  1390. dma->device_prep_dma_xor = ioat3_prep_pqxor;
  1391. dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
  1392. dma_cap_set(DMA_XOR, dma->cap_mask);
  1393. dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
  1394. if (device->cap & IOAT_CAP_RAID16SS) {
  1395. dma->max_xor = 16;
  1396. } else {
  1397. dma->max_xor = 8;
  1398. }
  1399. }
  1400. }
  1401. dma->device_tx_status = ioat3_tx_status;
  1402. device->cleanup_fn = ioat3_cleanup_event;
  1403. device->timer_fn = ioat3_timer_event;
  1404. /* starting with CB3.3 super extended descriptors are supported */
  1405. if (device->cap & IOAT_CAP_RAID16SS) {
  1406. char pool_name[14];
  1407. int i;
  1408. /* allocate sw descriptor pool for SED */
  1409. device->sed_pool = kmem_cache_create("ioat_sed",
  1410. sizeof(struct ioat_sed_ent), 0, 0, NULL);
  1411. if (!device->sed_pool)
  1412. return -ENOMEM;
  1413. for (i = 0; i < MAX_SED_POOLS; i++) {
  1414. snprintf(pool_name, 14, "ioat_hw%d_sed", i);
  1415. /* allocate SED DMA pool */
  1416. device->sed_hw_pool[i] = dma_pool_create(pool_name,
  1417. &pdev->dev,
  1418. SED_SIZE * (i + 1), 64, 0);
  1419. if (!device->sed_hw_pool[i])
  1420. goto sed_pool_cleanup;
  1421. }
  1422. }
  1423. err = ioat_probe(device);
  1424. if (err)
  1425. return err;
  1426. ioat_set_tcp_copy_break(262144);
  1427. list_for_each_entry(c, &dma->channels, device_node) {
  1428. chan = to_chan_common(c);
  1429. writel(IOAT_DMA_DCA_ANY_CPU,
  1430. chan->reg_base + IOAT_DCACTRL_OFFSET);
  1431. }
  1432. err = ioat_register(device);
  1433. if (err)
  1434. return err;
  1435. ioat_kobject_add(device, &ioat2_ktype);
  1436. if (dca)
  1437. device->dca = ioat3_dca_init(pdev, device->reg_base);
  1438. return 0;
  1439. sed_pool_cleanup:
  1440. if (device->sed_pool) {
  1441. int i;
  1442. kmem_cache_destroy(device->sed_pool);
  1443. for (i = 0; i < MAX_SED_POOLS; i++)
  1444. if (device->sed_hw_pool[i])
  1445. dma_pool_destroy(device->sed_hw_pool[i]);
  1446. }
  1447. return -ENOMEM;
  1448. }
  1449. void ioat3_dma_remove(struct ioatdma_device *device)
  1450. {
  1451. if (device->sed_pool) {
  1452. int i;
  1453. kmem_cache_destroy(device->sed_pool);
  1454. for (i = 0; i < MAX_SED_POOLS; i++)
  1455. if (device->sed_hw_pool[i])
  1456. dma_pool_destroy(device->sed_hw_pool[i]);
  1457. }
  1458. }