dma_v3.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms and conditions of the GNU General Public License,
  11. * version 2, as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along with
  19. * this program; if not, write to the Free Software Foundation, Inc.,
  20. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. *
  22. * The full GNU General Public License is included in this distribution in
  23. * the file called "COPYING".
  24. *
  25. * BSD LICENSE
  26. *
  27. * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions are met:
  31. *
  32. * * Redistributions of source code must retain the above copyright
  33. * notice, this list of conditions and the following disclaimer.
  34. * * Redistributions in binary form must reproduce the above copyright
  35. * notice, this list of conditions and the following disclaimer in
  36. * the documentation and/or other materials provided with the
  37. * distribution.
  38. * * Neither the name of Intel Corporation nor the names of its
  39. * contributors may be used to endorse or promote products derived
  40. * from this software without specific prior written permission.
  41. *
  42. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  43. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  44. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  45. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  46. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  47. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  48. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  49. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  50. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  51. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  52. * POSSIBILITY OF SUCH DAMAGE.
  53. */
  54. /*
  55. * Support routines for v3+ hardware
  56. */
  57. #include <linux/pci.h>
  58. #include <linux/gfp.h>
  59. #include <linux/dmaengine.h>
  60. #include <linux/dma-mapping.h>
  61. #include <linux/prefetch.h>
  62. #include "../dmaengine.h"
  63. #include "registers.h"
  64. #include "hw.h"
  65. #include "dma.h"
  66. #include "dma_v2.h"
  67. /* ioat hardware assumes at least two sources for raid operations */
  68. #define src_cnt_to_sw(x) ((x) + 2)
  69. #define src_cnt_to_hw(x) ((x) - 2)
  70. /* provide a lookup table for setting the source address in the base or
  71. * extended descriptor of an xor or pq descriptor
  72. */
  73. static const u8 xor_idx_to_desc = 0xe0;
  74. static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
  75. static const u8 pq_idx_to_desc = 0xf8;
  76. static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
  77. static void ioat3_eh(struct ioat2_dma_chan *ioat);
  78. static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
  79. {
  80. struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
  81. return raw->field[xor_idx_to_field[idx]];
  82. }
  83. static void xor_set_src(struct ioat_raw_descriptor *descs[2],
  84. dma_addr_t addr, u32 offset, int idx)
  85. {
  86. struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
  87. raw->field[xor_idx_to_field[idx]] = addr + offset;
  88. }
  89. static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
  90. {
  91. struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
  92. return raw->field[pq_idx_to_field[idx]];
  93. }
  94. static void pq_set_src(struct ioat_raw_descriptor *descs[2],
  95. dma_addr_t addr, u32 offset, u8 coef, int idx)
  96. {
  97. struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
  98. struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
  99. raw->field[pq_idx_to_field[idx]] = addr + offset;
  100. pq->coef[idx] = coef;
  101. }
  102. static bool is_jf_ioat(struct pci_dev *pdev)
  103. {
  104. switch (pdev->device) {
  105. case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
  106. case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
  107. case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
  108. case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
  109. case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
  110. case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
  111. case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
  112. case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
  113. case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
  114. case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
  115. return true;
  116. default:
  117. return false;
  118. }
  119. }
  120. static bool is_snb_ioat(struct pci_dev *pdev)
  121. {
  122. switch (pdev->device) {
  123. case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
  124. case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
  125. case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
  126. case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
  127. case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
  128. case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
  129. case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
  130. case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
  131. case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
  132. case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
  133. return true;
  134. default:
  135. return false;
  136. }
  137. }
  138. static bool is_ivb_ioat(struct pci_dev *pdev)
  139. {
  140. switch (pdev->device) {
  141. case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
  142. case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
  143. case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
  144. case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
  145. case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
  146. case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
  147. case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
  148. case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
  149. case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
  150. case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
  151. return true;
  152. default:
  153. return false;
  154. }
  155. }
  156. static bool is_hsw_ioat(struct pci_dev *pdev)
  157. {
  158. switch (pdev->device) {
  159. case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
  160. case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
  161. case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
  162. case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
  163. case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
  164. case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
  165. case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
  166. case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
  167. case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
  168. case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
  169. return true;
  170. default:
  171. return false;
  172. }
  173. }
  174. static bool is_xeon_cb32(struct pci_dev *pdev)
  175. {
  176. return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
  177. is_hsw_ioat(pdev);
  178. }
  179. static bool is_bwd_ioat(struct pci_dev *pdev)
  180. {
  181. switch (pdev->device) {
  182. case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
  183. case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
  184. case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
  185. case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
  186. return true;
  187. default:
  188. return false;
  189. }
  190. }
  191. static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
  192. struct ioat_ring_ent *desc, int idx)
  193. {
  194. struct ioat_chan_common *chan = &ioat->base;
  195. struct pci_dev *pdev = chan->device->pdev;
  196. size_t len = desc->len;
  197. size_t offset = len - desc->hw->size;
  198. struct dma_async_tx_descriptor *tx = &desc->txd;
  199. enum dma_ctrl_flags flags = tx->flags;
  200. switch (desc->hw->ctl_f.op) {
  201. case IOAT_OP_COPY:
  202. if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */
  203. ioat_dma_unmap(chan, flags, len, desc->hw);
  204. break;
  205. case IOAT_OP_FILL: {
  206. struct ioat_fill_descriptor *hw = desc->fill;
  207. if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
  208. ioat_unmap(pdev, hw->dst_addr - offset, len,
  209. PCI_DMA_FROMDEVICE, flags, 1);
  210. break;
  211. }
  212. case IOAT_OP_XOR_VAL:
  213. case IOAT_OP_XOR: {
  214. struct ioat_xor_descriptor *xor = desc->xor;
  215. struct ioat_ring_ent *ext;
  216. struct ioat_xor_ext_descriptor *xor_ex = NULL;
  217. int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt);
  218. struct ioat_raw_descriptor *descs[2];
  219. int i;
  220. if (src_cnt > 5) {
  221. ext = ioat2_get_ring_ent(ioat, idx + 1);
  222. xor_ex = ext->xor_ex;
  223. }
  224. if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
  225. descs[0] = (struct ioat_raw_descriptor *) xor;
  226. descs[1] = (struct ioat_raw_descriptor *) xor_ex;
  227. for (i = 0; i < src_cnt; i++) {
  228. dma_addr_t src = xor_get_src(descs, i);
  229. ioat_unmap(pdev, src - offset, len,
  230. PCI_DMA_TODEVICE, flags, 0);
  231. }
  232. /* dest is a source in xor validate operations */
  233. if (xor->ctl_f.op == IOAT_OP_XOR_VAL) {
  234. ioat_unmap(pdev, xor->dst_addr - offset, len,
  235. PCI_DMA_TODEVICE, flags, 1);
  236. break;
  237. }
  238. }
  239. if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
  240. ioat_unmap(pdev, xor->dst_addr - offset, len,
  241. PCI_DMA_FROMDEVICE, flags, 1);
  242. break;
  243. }
  244. case IOAT_OP_PQ_VAL:
  245. case IOAT_OP_PQ: {
  246. struct ioat_pq_descriptor *pq = desc->pq;
  247. struct ioat_ring_ent *ext;
  248. struct ioat_pq_ext_descriptor *pq_ex = NULL;
  249. int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
  250. struct ioat_raw_descriptor *descs[2];
  251. int i;
  252. if (src_cnt > 3) {
  253. ext = ioat2_get_ring_ent(ioat, idx + 1);
  254. pq_ex = ext->pq_ex;
  255. }
  256. /* in the 'continue' case don't unmap the dests as sources */
  257. if (dmaf_p_disabled_continue(flags))
  258. src_cnt--;
  259. else if (dmaf_continue(flags))
  260. src_cnt -= 3;
  261. if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
  262. descs[0] = (struct ioat_raw_descriptor *) pq;
  263. descs[1] = (struct ioat_raw_descriptor *) pq_ex;
  264. for (i = 0; i < src_cnt; i++) {
  265. dma_addr_t src = pq_get_src(descs, i);
  266. ioat_unmap(pdev, src - offset, len,
  267. PCI_DMA_TODEVICE, flags, 0);
  268. }
  269. /* the dests are sources in pq validate operations */
  270. if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
  271. if (!(flags & DMA_PREP_PQ_DISABLE_P))
  272. ioat_unmap(pdev, pq->p_addr - offset,
  273. len, PCI_DMA_TODEVICE, flags, 0);
  274. if (!(flags & DMA_PREP_PQ_DISABLE_Q))
  275. ioat_unmap(pdev, pq->q_addr - offset,
  276. len, PCI_DMA_TODEVICE, flags, 0);
  277. break;
  278. }
  279. }
  280. if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
  281. if (!(flags & DMA_PREP_PQ_DISABLE_P))
  282. ioat_unmap(pdev, pq->p_addr - offset, len,
  283. PCI_DMA_BIDIRECTIONAL, flags, 1);
  284. if (!(flags & DMA_PREP_PQ_DISABLE_Q))
  285. ioat_unmap(pdev, pq->q_addr - offset, len,
  286. PCI_DMA_BIDIRECTIONAL, flags, 1);
  287. }
  288. break;
  289. }
  290. default:
  291. dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
  292. __func__, desc->hw->ctl_f.op);
  293. }
  294. }
  295. static bool desc_has_ext(struct ioat_ring_ent *desc)
  296. {
  297. struct ioat_dma_descriptor *hw = desc->hw;
  298. if (hw->ctl_f.op == IOAT_OP_XOR ||
  299. hw->ctl_f.op == IOAT_OP_XOR_VAL) {
  300. struct ioat_xor_descriptor *xor = desc->xor;
  301. if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
  302. return true;
  303. } else if (hw->ctl_f.op == IOAT_OP_PQ ||
  304. hw->ctl_f.op == IOAT_OP_PQ_VAL) {
  305. struct ioat_pq_descriptor *pq = desc->pq;
  306. if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
  307. return true;
  308. }
  309. return false;
  310. }
  311. static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
  312. {
  313. u64 phys_complete;
  314. u64 completion;
  315. completion = *chan->completion;
  316. phys_complete = ioat_chansts_to_addr(completion);
  317. dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
  318. (unsigned long long) phys_complete);
  319. return phys_complete;
  320. }
  321. static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
  322. u64 *phys_complete)
  323. {
  324. *phys_complete = ioat3_get_current_completion(chan);
  325. if (*phys_complete == chan->last_completion)
  326. return false;
  327. clear_bit(IOAT_COMPLETION_ACK, &chan->state);
  328. mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
  329. return true;
  330. }
  331. /**
  332. * __cleanup - reclaim used descriptors
  333. * @ioat: channel (ring) to clean
  334. *
  335. * The difference from the dma_v2.c __cleanup() is that this routine
  336. * handles extended descriptors and dma-unmapping raid operations.
  337. */
  338. static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
  339. {
  340. struct ioat_chan_common *chan = &ioat->base;
  341. struct ioat_ring_ent *desc;
  342. bool seen_current = false;
  343. int idx = ioat->tail, i;
  344. u16 active;
  345. dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
  346. __func__, ioat->head, ioat->tail, ioat->issued);
  347. /*
  348. * At restart of the channel, the completion address and the
  349. * channel status will be 0 due to starting a new chain. Since
  350. * it's new chain and the first descriptor "fails", there is
  351. * nothing to clean up. We do not want to reap the entire submitted
  352. * chain due to this 0 address value and then BUG.
  353. */
  354. if (!phys_complete)
  355. return;
  356. active = ioat2_ring_active(ioat);
  357. for (i = 0; i < active && !seen_current; i++) {
  358. struct dma_async_tx_descriptor *tx;
  359. smp_read_barrier_depends();
  360. prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
  361. desc = ioat2_get_ring_ent(ioat, idx + i);
  362. dump_desc_dbg(ioat, desc);
  363. tx = &desc->txd;
  364. if (tx->cookie) {
  365. dma_cookie_complete(tx);
  366. ioat3_dma_unmap(ioat, desc, idx + i);
  367. if (tx->callback) {
  368. tx->callback(tx->callback_param);
  369. tx->callback = NULL;
  370. }
  371. }
  372. if (tx->phys == phys_complete)
  373. seen_current = true;
  374. /* skip extended descriptors */
  375. if (desc_has_ext(desc)) {
  376. BUG_ON(i + 1 >= active);
  377. i++;
  378. }
  379. }
  380. smp_mb(); /* finish all descriptor reads before incrementing tail */
  381. ioat->tail = idx + i;
  382. BUG_ON(active && !seen_current); /* no active descs have written a completion? */
  383. chan->last_completion = phys_complete;
  384. if (active - i == 0) {
  385. dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
  386. __func__);
  387. clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
  388. mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
  389. }
  390. /* 5 microsecond delay per pending descriptor */
  391. writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
  392. chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
  393. }
  394. static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
  395. {
  396. struct ioat_chan_common *chan = &ioat->base;
  397. u64 phys_complete;
  398. spin_lock_bh(&chan->cleanup_lock);
  399. if (ioat3_cleanup_preamble(chan, &phys_complete))
  400. __cleanup(ioat, phys_complete);
  401. if (is_ioat_halted(*chan->completion)) {
  402. u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
  403. if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
  404. mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
  405. ioat3_eh(ioat);
  406. }
  407. }
  408. spin_unlock_bh(&chan->cleanup_lock);
  409. }
  410. static void ioat3_cleanup_event(unsigned long data)
  411. {
  412. struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
  413. ioat3_cleanup(ioat);
  414. writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
  415. }
  416. static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
  417. {
  418. struct ioat_chan_common *chan = &ioat->base;
  419. u64 phys_complete;
  420. ioat2_quiesce(chan, 0);
  421. if (ioat3_cleanup_preamble(chan, &phys_complete))
  422. __cleanup(ioat, phys_complete);
  423. __ioat2_restart_chan(ioat);
  424. }
  425. static void ioat3_eh(struct ioat2_dma_chan *ioat)
  426. {
  427. struct ioat_chan_common *chan = &ioat->base;
  428. struct pci_dev *pdev = to_pdev(chan);
  429. struct ioat_dma_descriptor *hw;
  430. u64 phys_complete;
  431. struct ioat_ring_ent *desc;
  432. u32 err_handled = 0;
  433. u32 chanerr_int;
  434. u32 chanerr;
  435. /* cleanup so tail points to descriptor that caused the error */
  436. if (ioat3_cleanup_preamble(chan, &phys_complete))
  437. __cleanup(ioat, phys_complete);
  438. chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
  439. pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
  440. dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
  441. __func__, chanerr, chanerr_int);
  442. desc = ioat2_get_ring_ent(ioat, ioat->tail);
  443. hw = desc->hw;
  444. dump_desc_dbg(ioat, desc);
  445. switch (hw->ctl_f.op) {
  446. case IOAT_OP_XOR_VAL:
  447. if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
  448. *desc->result |= SUM_CHECK_P_RESULT;
  449. err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
  450. }
  451. break;
  452. case IOAT_OP_PQ_VAL:
  453. if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
  454. *desc->result |= SUM_CHECK_P_RESULT;
  455. err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
  456. }
  457. if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
  458. *desc->result |= SUM_CHECK_Q_RESULT;
  459. err_handled |= IOAT_CHANERR_XOR_Q_ERR;
  460. }
  461. break;
  462. }
  463. /* fault on unhandled error or spurious halt */
  464. if (chanerr ^ err_handled || chanerr == 0) {
  465. dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
  466. __func__, chanerr, err_handled);
  467. BUG();
  468. }
  469. writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
  470. pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
  471. /* mark faulting descriptor as complete */
  472. *chan->completion = desc->txd.phys;
  473. spin_lock_bh(&ioat->prep_lock);
  474. ioat3_restart_channel(ioat);
  475. spin_unlock_bh(&ioat->prep_lock);
  476. }
  477. static void check_active(struct ioat2_dma_chan *ioat)
  478. {
  479. struct ioat_chan_common *chan = &ioat->base;
  480. if (ioat2_ring_active(ioat)) {
  481. mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
  482. return;
  483. }
  484. if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
  485. mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
  486. else if (ioat->alloc_order > ioat_get_alloc_order()) {
  487. /* if the ring is idle, empty, and oversized try to step
  488. * down the size
  489. */
  490. reshape_ring(ioat, ioat->alloc_order - 1);
  491. /* keep shrinking until we get back to our minimum
  492. * default size
  493. */
  494. if (ioat->alloc_order > ioat_get_alloc_order())
  495. mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
  496. }
  497. }
  498. static void ioat3_timer_event(unsigned long data)
  499. {
  500. struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
  501. struct ioat_chan_common *chan = &ioat->base;
  502. dma_addr_t phys_complete;
  503. u64 status;
  504. status = ioat_chansts(chan);
  505. /* when halted due to errors check for channel
  506. * programming errors before advancing the completion state
  507. */
  508. if (is_ioat_halted(status)) {
  509. u32 chanerr;
  510. chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
  511. dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
  512. __func__, chanerr);
  513. if (test_bit(IOAT_RUN, &chan->state))
  514. BUG_ON(is_ioat_bug(chanerr));
  515. else /* we never got off the ground */
  516. return;
  517. }
  518. /* if we haven't made progress and we have already
  519. * acknowledged a pending completion once, then be more
  520. * forceful with a restart
  521. */
  522. spin_lock_bh(&chan->cleanup_lock);
  523. if (ioat_cleanup_preamble(chan, &phys_complete))
  524. __cleanup(ioat, phys_complete);
  525. else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
  526. spin_lock_bh(&ioat->prep_lock);
  527. ioat3_restart_channel(ioat);
  528. spin_unlock_bh(&ioat->prep_lock);
  529. spin_unlock_bh(&chan->cleanup_lock);
  530. return;
  531. } else {
  532. set_bit(IOAT_COMPLETION_ACK, &chan->state);
  533. mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
  534. }
  535. if (ioat2_ring_active(ioat))
  536. mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
  537. else {
  538. spin_lock_bh(&ioat->prep_lock);
  539. check_active(ioat);
  540. spin_unlock_bh(&ioat->prep_lock);
  541. }
  542. spin_unlock_bh(&chan->cleanup_lock);
  543. }
  544. static enum dma_status
  545. ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
  546. struct dma_tx_state *txstate)
  547. {
  548. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  549. enum dma_status ret;
  550. ret = dma_cookie_status(c, cookie, txstate);
  551. if (ret == DMA_SUCCESS)
  552. return ret;
  553. ioat3_cleanup(ioat);
  554. return dma_cookie_status(c, cookie, txstate);
  555. }
  556. static struct dma_async_tx_descriptor *
  557. ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value,
  558. size_t len, unsigned long flags)
  559. {
  560. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  561. struct ioat_ring_ent *desc;
  562. size_t total_len = len;
  563. struct ioat_fill_descriptor *fill;
  564. u64 src_data = (0x0101010101010101ULL) * (value & 0xff);
  565. int num_descs, idx, i;
  566. num_descs = ioat2_xferlen_to_descs(ioat, len);
  567. if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
  568. idx = ioat->head;
  569. else
  570. return NULL;
  571. i = 0;
  572. do {
  573. size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
  574. desc = ioat2_get_ring_ent(ioat, idx + i);
  575. fill = desc->fill;
  576. fill->size = xfer_size;
  577. fill->src_data = src_data;
  578. fill->dst_addr = dest;
  579. fill->ctl = 0;
  580. fill->ctl_f.op = IOAT_OP_FILL;
  581. len -= xfer_size;
  582. dest += xfer_size;
  583. dump_desc_dbg(ioat, desc);
  584. } while (++i < num_descs);
  585. desc->txd.flags = flags;
  586. desc->len = total_len;
  587. fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
  588. fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
  589. fill->ctl_f.compl_write = 1;
  590. dump_desc_dbg(ioat, desc);
  591. /* we leave the channel locked to ensure in order submission */
  592. return &desc->txd;
  593. }
  594. static struct dma_async_tx_descriptor *
  595. __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
  596. dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
  597. size_t len, unsigned long flags)
  598. {
  599. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  600. struct ioat_ring_ent *compl_desc;
  601. struct ioat_ring_ent *desc;
  602. struct ioat_ring_ent *ext;
  603. size_t total_len = len;
  604. struct ioat_xor_descriptor *xor;
  605. struct ioat_xor_ext_descriptor *xor_ex = NULL;
  606. struct ioat_dma_descriptor *hw;
  607. int num_descs, with_ext, idx, i;
  608. u32 offset = 0;
  609. u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
  610. BUG_ON(src_cnt < 2);
  611. num_descs = ioat2_xferlen_to_descs(ioat, len);
  612. /* we need 2x the number of descriptors to cover greater than 5
  613. * sources
  614. */
  615. if (src_cnt > 5) {
  616. with_ext = 1;
  617. num_descs *= 2;
  618. } else
  619. with_ext = 0;
  620. /* completion writes from the raid engine may pass completion
  621. * writes from the legacy engine, so we need one extra null
  622. * (legacy) descriptor to ensure all completion writes arrive in
  623. * order.
  624. */
  625. if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
  626. idx = ioat->head;
  627. else
  628. return NULL;
  629. i = 0;
  630. do {
  631. struct ioat_raw_descriptor *descs[2];
  632. size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
  633. int s;
  634. desc = ioat2_get_ring_ent(ioat, idx + i);
  635. xor = desc->xor;
  636. /* save a branch by unconditionally retrieving the
  637. * extended descriptor xor_set_src() knows to not write
  638. * to it in the single descriptor case
  639. */
  640. ext = ioat2_get_ring_ent(ioat, idx + i + 1);
  641. xor_ex = ext->xor_ex;
  642. descs[0] = (struct ioat_raw_descriptor *) xor;
  643. descs[1] = (struct ioat_raw_descriptor *) xor_ex;
  644. for (s = 0; s < src_cnt; s++)
  645. xor_set_src(descs, src[s], offset, s);
  646. xor->size = xfer_size;
  647. xor->dst_addr = dest + offset;
  648. xor->ctl = 0;
  649. xor->ctl_f.op = op;
  650. xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
  651. len -= xfer_size;
  652. offset += xfer_size;
  653. dump_desc_dbg(ioat, desc);
  654. } while ((i += 1 + with_ext) < num_descs);
  655. /* last xor descriptor carries the unmap parameters and fence bit */
  656. desc->txd.flags = flags;
  657. desc->len = total_len;
  658. if (result)
  659. desc->result = result;
  660. xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
  661. /* completion descriptor carries interrupt bit */
  662. compl_desc = ioat2_get_ring_ent(ioat, idx + i);
  663. compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
  664. hw = compl_desc->hw;
  665. hw->ctl = 0;
  666. hw->ctl_f.null = 1;
  667. hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
  668. hw->ctl_f.compl_write = 1;
  669. hw->size = NULL_DESC_BUFFER_SIZE;
  670. dump_desc_dbg(ioat, compl_desc);
  671. /* we leave the channel locked to ensure in order submission */
  672. return &compl_desc->txd;
  673. }
  674. static struct dma_async_tx_descriptor *
  675. ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
  676. unsigned int src_cnt, size_t len, unsigned long flags)
  677. {
  678. return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
  679. }
  680. struct dma_async_tx_descriptor *
  681. ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
  682. unsigned int src_cnt, size_t len,
  683. enum sum_check_flags *result, unsigned long flags)
  684. {
  685. /* the cleanup routine only sets bits on validate failure, it
  686. * does not clear bits on validate success... so clear it here
  687. */
  688. *result = 0;
  689. return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
  690. src_cnt - 1, len, flags);
  691. }
  692. static void
  693. dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
  694. {
  695. struct device *dev = to_dev(&ioat->base);
  696. struct ioat_pq_descriptor *pq = desc->pq;
  697. struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
  698. struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
  699. int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
  700. int i;
  701. dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
  702. " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s' src_cnt: %d)\n",
  703. desc_id(desc), (unsigned long long) desc->txd.phys,
  704. (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
  705. desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
  706. pq->ctl_f.compl_write,
  707. pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
  708. pq->ctl_f.src_cnt);
  709. for (i = 0; i < src_cnt; i++)
  710. dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
  711. (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
  712. dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
  713. dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
  714. dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
  715. }
  716. static struct dma_async_tx_descriptor *
  717. __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
  718. const dma_addr_t *dst, const dma_addr_t *src,
  719. unsigned int src_cnt, const unsigned char *scf,
  720. size_t len, unsigned long flags)
  721. {
  722. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  723. struct ioat_chan_common *chan = &ioat->base;
  724. struct ioat_ring_ent *compl_desc;
  725. struct ioat_ring_ent *desc;
  726. struct ioat_ring_ent *ext;
  727. size_t total_len = len;
  728. struct ioat_pq_descriptor *pq;
  729. struct ioat_pq_ext_descriptor *pq_ex = NULL;
  730. struct ioat_dma_descriptor *hw;
  731. u32 offset = 0;
  732. u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
  733. int i, s, idx, with_ext, num_descs;
  734. dev_dbg(to_dev(chan), "%s\n", __func__);
  735. /* the engine requires at least two sources (we provide
  736. * at least 1 implied source in the DMA_PREP_CONTINUE case)
  737. */
  738. BUG_ON(src_cnt + dmaf_continue(flags) < 2);
  739. num_descs = ioat2_xferlen_to_descs(ioat, len);
  740. /* we need 2x the number of descriptors to cover greater than 3
  741. * sources (we need 1 extra source in the q-only continuation
  742. * case and 3 extra sources in the p+q continuation case.
  743. */
  744. if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
  745. (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
  746. with_ext = 1;
  747. num_descs *= 2;
  748. } else
  749. with_ext = 0;
  750. /* completion writes from the raid engine may pass completion
  751. * writes from the legacy engine, so we need one extra null
  752. * (legacy) descriptor to ensure all completion writes arrive in
  753. * order.
  754. */
  755. if (likely(num_descs) &&
  756. ioat2_check_space_lock(ioat, num_descs+1) == 0)
  757. idx = ioat->head;
  758. else
  759. return NULL;
  760. i = 0;
  761. do {
  762. struct ioat_raw_descriptor *descs[2];
  763. size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
  764. desc = ioat2_get_ring_ent(ioat, idx + i);
  765. pq = desc->pq;
  766. /* save a branch by unconditionally retrieving the
  767. * extended descriptor pq_set_src() knows to not write
  768. * to it in the single descriptor case
  769. */
  770. ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
  771. pq_ex = ext->pq_ex;
  772. descs[0] = (struct ioat_raw_descriptor *) pq;
  773. descs[1] = (struct ioat_raw_descriptor *) pq_ex;
  774. for (s = 0; s < src_cnt; s++)
  775. pq_set_src(descs, src[s], offset, scf[s], s);
  776. /* see the comment for dma_maxpq in include/linux/dmaengine.h */
  777. if (dmaf_p_disabled_continue(flags))
  778. pq_set_src(descs, dst[1], offset, 1, s++);
  779. else if (dmaf_continue(flags)) {
  780. pq_set_src(descs, dst[0], offset, 0, s++);
  781. pq_set_src(descs, dst[1], offset, 1, s++);
  782. pq_set_src(descs, dst[1], offset, 0, s++);
  783. }
  784. pq->size = xfer_size;
  785. pq->p_addr = dst[0] + offset;
  786. pq->q_addr = dst[1] + offset;
  787. pq->ctl = 0;
  788. pq->ctl_f.op = op;
  789. pq->ctl_f.src_cnt = src_cnt_to_hw(s);
  790. pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
  791. pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
  792. len -= xfer_size;
  793. offset += xfer_size;
  794. } while ((i += 1 + with_ext) < num_descs);
  795. /* last pq descriptor carries the unmap parameters and fence bit */
  796. desc->txd.flags = flags;
  797. desc->len = total_len;
  798. if (result)
  799. desc->result = result;
  800. pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
  801. dump_pq_desc_dbg(ioat, desc, ext);
  802. /* completion descriptor carries interrupt bit */
  803. compl_desc = ioat2_get_ring_ent(ioat, idx + i);
  804. compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
  805. hw = compl_desc->hw;
  806. hw->ctl = 0;
  807. hw->ctl_f.null = 1;
  808. hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
  809. hw->ctl_f.compl_write = 1;
  810. hw->size = NULL_DESC_BUFFER_SIZE;
  811. dump_desc_dbg(ioat, compl_desc);
  812. /* we leave the channel locked to ensure in order submission */
  813. return &compl_desc->txd;
  814. }
  815. static struct dma_async_tx_descriptor *
  816. ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
  817. unsigned int src_cnt, const unsigned char *scf, size_t len,
  818. unsigned long flags)
  819. {
  820. /* specify valid address for disabled result */
  821. if (flags & DMA_PREP_PQ_DISABLE_P)
  822. dst[0] = dst[1];
  823. if (flags & DMA_PREP_PQ_DISABLE_Q)
  824. dst[1] = dst[0];
  825. /* handle the single source multiply case from the raid6
  826. * recovery path
  827. */
  828. if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
  829. dma_addr_t single_source[2];
  830. unsigned char single_source_coef[2];
  831. BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
  832. single_source[0] = src[0];
  833. single_source[1] = src[0];
  834. single_source_coef[0] = scf[0];
  835. single_source_coef[1] = 0;
  836. return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
  837. single_source_coef, len, flags);
  838. } else
  839. return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf,
  840. len, flags);
  841. }
  842. struct dma_async_tx_descriptor *
  843. ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
  844. unsigned int src_cnt, const unsigned char *scf, size_t len,
  845. enum sum_check_flags *pqres, unsigned long flags)
  846. {
  847. /* specify valid address for disabled result */
  848. if (flags & DMA_PREP_PQ_DISABLE_P)
  849. pq[0] = pq[1];
  850. if (flags & DMA_PREP_PQ_DISABLE_Q)
  851. pq[1] = pq[0];
  852. /* the cleanup routine only sets bits on validate failure, it
  853. * does not clear bits on validate success... so clear it here
  854. */
  855. *pqres = 0;
  856. return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
  857. flags);
  858. }
  859. static struct dma_async_tx_descriptor *
  860. ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
  861. unsigned int src_cnt, size_t len, unsigned long flags)
  862. {
  863. unsigned char scf[src_cnt];
  864. dma_addr_t pq[2];
  865. memset(scf, 0, src_cnt);
  866. pq[0] = dst;
  867. flags |= DMA_PREP_PQ_DISABLE_Q;
  868. pq[1] = dst; /* specify valid address for disabled result */
  869. return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
  870. flags);
  871. }
  872. struct dma_async_tx_descriptor *
  873. ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
  874. unsigned int src_cnt, size_t len,
  875. enum sum_check_flags *result, unsigned long flags)
  876. {
  877. unsigned char scf[src_cnt];
  878. dma_addr_t pq[2];
  879. /* the cleanup routine only sets bits on validate failure, it
  880. * does not clear bits on validate success... so clear it here
  881. */
  882. *result = 0;
  883. memset(scf, 0, src_cnt);
  884. pq[0] = src[0];
  885. flags |= DMA_PREP_PQ_DISABLE_Q;
  886. pq[1] = pq[0]; /* specify valid address for disabled result */
  887. return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
  888. len, flags);
  889. }
  890. static struct dma_async_tx_descriptor *
  891. ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
  892. {
  893. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  894. struct ioat_ring_ent *desc;
  895. struct ioat_dma_descriptor *hw;
  896. if (ioat2_check_space_lock(ioat, 1) == 0)
  897. desc = ioat2_get_ring_ent(ioat, ioat->head);
  898. else
  899. return NULL;
  900. hw = desc->hw;
  901. hw->ctl = 0;
  902. hw->ctl_f.null = 1;
  903. hw->ctl_f.int_en = 1;
  904. hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
  905. hw->ctl_f.compl_write = 1;
  906. hw->size = NULL_DESC_BUFFER_SIZE;
  907. hw->src_addr = 0;
  908. hw->dst_addr = 0;
  909. desc->txd.flags = flags;
  910. desc->len = 1;
  911. dump_desc_dbg(ioat, desc);
  912. /* we leave the channel locked to ensure in order submission */
  913. return &desc->txd;
  914. }
  915. static void ioat3_dma_test_callback(void *dma_async_param)
  916. {
  917. struct completion *cmp = dma_async_param;
  918. complete(cmp);
  919. }
  920. #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
  921. static int ioat_xor_val_self_test(struct ioatdma_device *device)
  922. {
  923. int i, src_idx;
  924. struct page *dest;
  925. struct page *xor_srcs[IOAT_NUM_SRC_TEST];
  926. struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
  927. dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
  928. dma_addr_t dma_addr, dest_dma;
  929. struct dma_async_tx_descriptor *tx;
  930. struct dma_chan *dma_chan;
  931. dma_cookie_t cookie;
  932. u8 cmp_byte = 0;
  933. u32 cmp_word;
  934. u32 xor_val_result;
  935. int err = 0;
  936. struct completion cmp;
  937. unsigned long tmo;
  938. struct device *dev = &device->pdev->dev;
  939. struct dma_device *dma = &device->common;
  940. u8 op = 0;
  941. dev_dbg(dev, "%s\n", __func__);
  942. if (!dma_has_cap(DMA_XOR, dma->cap_mask))
  943. return 0;
  944. for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
  945. xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
  946. if (!xor_srcs[src_idx]) {
  947. while (src_idx--)
  948. __free_page(xor_srcs[src_idx]);
  949. return -ENOMEM;
  950. }
  951. }
  952. dest = alloc_page(GFP_KERNEL);
  953. if (!dest) {
  954. while (src_idx--)
  955. __free_page(xor_srcs[src_idx]);
  956. return -ENOMEM;
  957. }
  958. /* Fill in src buffers */
  959. for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
  960. u8 *ptr = page_address(xor_srcs[src_idx]);
  961. for (i = 0; i < PAGE_SIZE; i++)
  962. ptr[i] = (1 << src_idx);
  963. }
  964. for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
  965. cmp_byte ^= (u8) (1 << src_idx);
  966. cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
  967. (cmp_byte << 8) | cmp_byte;
  968. memset(page_address(dest), 0, PAGE_SIZE);
  969. dma_chan = container_of(dma->channels.next, struct dma_chan,
  970. device_node);
  971. if (dma->device_alloc_chan_resources(dma_chan) < 1) {
  972. err = -ENODEV;
  973. goto out;
  974. }
  975. /* test xor */
  976. op = IOAT_OP_XOR;
  977. dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
  978. for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
  979. dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
  980. DMA_TO_DEVICE);
  981. tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
  982. IOAT_NUM_SRC_TEST, PAGE_SIZE,
  983. DMA_PREP_INTERRUPT |
  984. DMA_COMPL_SKIP_SRC_UNMAP |
  985. DMA_COMPL_SKIP_DEST_UNMAP);
  986. if (!tx) {
  987. dev_err(dev, "Self-test xor prep failed\n");
  988. err = -ENODEV;
  989. goto dma_unmap;
  990. }
  991. async_tx_ack(tx);
  992. init_completion(&cmp);
  993. tx->callback = ioat3_dma_test_callback;
  994. tx->callback_param = &cmp;
  995. cookie = tx->tx_submit(tx);
  996. if (cookie < 0) {
  997. dev_err(dev, "Self-test xor setup failed\n");
  998. err = -ENODEV;
  999. goto dma_unmap;
  1000. }
  1001. dma->device_issue_pending(dma_chan);
  1002. tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
  1003. if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
  1004. dev_err(dev, "Self-test xor timed out\n");
  1005. err = -ENODEV;
  1006. goto dma_unmap;
  1007. }
  1008. dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
  1009. for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
  1010. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
  1011. dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
  1012. for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
  1013. u32 *ptr = page_address(dest);
  1014. if (ptr[i] != cmp_word) {
  1015. dev_err(dev, "Self-test xor failed compare\n");
  1016. err = -ENODEV;
  1017. goto free_resources;
  1018. }
  1019. }
  1020. dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
  1021. /* skip validate if the capability is not present */
  1022. if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
  1023. goto free_resources;
  1024. op = IOAT_OP_XOR_VAL;
  1025. /* validate the sources with the destintation page */
  1026. for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
  1027. xor_val_srcs[i] = xor_srcs[i];
  1028. xor_val_srcs[i] = dest;
  1029. xor_val_result = 1;
  1030. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1031. dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
  1032. DMA_TO_DEVICE);
  1033. tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
  1034. IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
  1035. &xor_val_result, DMA_PREP_INTERRUPT |
  1036. DMA_COMPL_SKIP_SRC_UNMAP |
  1037. DMA_COMPL_SKIP_DEST_UNMAP);
  1038. if (!tx) {
  1039. dev_err(dev, "Self-test zero prep failed\n");
  1040. err = -ENODEV;
  1041. goto dma_unmap;
  1042. }
  1043. async_tx_ack(tx);
  1044. init_completion(&cmp);
  1045. tx->callback = ioat3_dma_test_callback;
  1046. tx->callback_param = &cmp;
  1047. cookie = tx->tx_submit(tx);
  1048. if (cookie < 0) {
  1049. dev_err(dev, "Self-test zero setup failed\n");
  1050. err = -ENODEV;
  1051. goto dma_unmap;
  1052. }
  1053. dma->device_issue_pending(dma_chan);
  1054. tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
  1055. if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
  1056. dev_err(dev, "Self-test validate timed out\n");
  1057. err = -ENODEV;
  1058. goto dma_unmap;
  1059. }
  1060. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1061. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
  1062. if (xor_val_result != 0) {
  1063. dev_err(dev, "Self-test validate failed compare\n");
  1064. err = -ENODEV;
  1065. goto free_resources;
  1066. }
  1067. /* skip memset if the capability is not present */
  1068. if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask))
  1069. goto free_resources;
  1070. /* test memset */
  1071. op = IOAT_OP_FILL;
  1072. dma_addr = dma_map_page(dev, dest, 0,
  1073. PAGE_SIZE, DMA_FROM_DEVICE);
  1074. tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
  1075. DMA_PREP_INTERRUPT |
  1076. DMA_COMPL_SKIP_SRC_UNMAP |
  1077. DMA_COMPL_SKIP_DEST_UNMAP);
  1078. if (!tx) {
  1079. dev_err(dev, "Self-test memset prep failed\n");
  1080. err = -ENODEV;
  1081. goto dma_unmap;
  1082. }
  1083. async_tx_ack(tx);
  1084. init_completion(&cmp);
  1085. tx->callback = ioat3_dma_test_callback;
  1086. tx->callback_param = &cmp;
  1087. cookie = tx->tx_submit(tx);
  1088. if (cookie < 0) {
  1089. dev_err(dev, "Self-test memset setup failed\n");
  1090. err = -ENODEV;
  1091. goto dma_unmap;
  1092. }
  1093. dma->device_issue_pending(dma_chan);
  1094. tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
  1095. if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
  1096. dev_err(dev, "Self-test memset timed out\n");
  1097. err = -ENODEV;
  1098. goto dma_unmap;
  1099. }
  1100. dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
  1101. for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
  1102. u32 *ptr = page_address(dest);
  1103. if (ptr[i]) {
  1104. dev_err(dev, "Self-test memset failed compare\n");
  1105. err = -ENODEV;
  1106. goto free_resources;
  1107. }
  1108. }
  1109. /* test for non-zero parity sum */
  1110. op = IOAT_OP_XOR_VAL;
  1111. xor_val_result = 0;
  1112. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1113. dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
  1114. DMA_TO_DEVICE);
  1115. tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
  1116. IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
  1117. &xor_val_result, DMA_PREP_INTERRUPT |
  1118. DMA_COMPL_SKIP_SRC_UNMAP |
  1119. DMA_COMPL_SKIP_DEST_UNMAP);
  1120. if (!tx) {
  1121. dev_err(dev, "Self-test 2nd zero prep failed\n");
  1122. err = -ENODEV;
  1123. goto dma_unmap;
  1124. }
  1125. async_tx_ack(tx);
  1126. init_completion(&cmp);
  1127. tx->callback = ioat3_dma_test_callback;
  1128. tx->callback_param = &cmp;
  1129. cookie = tx->tx_submit(tx);
  1130. if (cookie < 0) {
  1131. dev_err(dev, "Self-test 2nd zero setup failed\n");
  1132. err = -ENODEV;
  1133. goto dma_unmap;
  1134. }
  1135. dma->device_issue_pending(dma_chan);
  1136. tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
  1137. if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
  1138. dev_err(dev, "Self-test 2nd validate timed out\n");
  1139. err = -ENODEV;
  1140. goto dma_unmap;
  1141. }
  1142. if (xor_val_result != SUM_CHECK_P_RESULT) {
  1143. dev_err(dev, "Self-test validate failed compare\n");
  1144. err = -ENODEV;
  1145. goto dma_unmap;
  1146. }
  1147. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1148. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
  1149. goto free_resources;
  1150. dma_unmap:
  1151. if (op == IOAT_OP_XOR) {
  1152. dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
  1153. for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
  1154. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
  1155. DMA_TO_DEVICE);
  1156. } else if (op == IOAT_OP_XOR_VAL) {
  1157. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1158. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
  1159. DMA_TO_DEVICE);
  1160. } else if (op == IOAT_OP_FILL)
  1161. dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
  1162. free_resources:
  1163. dma->device_free_chan_resources(dma_chan);
  1164. out:
  1165. src_idx = IOAT_NUM_SRC_TEST;
  1166. while (src_idx--)
  1167. __free_page(xor_srcs[src_idx]);
  1168. __free_page(dest);
  1169. return err;
  1170. }
  1171. static int ioat3_dma_self_test(struct ioatdma_device *device)
  1172. {
  1173. int rc = ioat_dma_self_test(device);
  1174. if (rc)
  1175. return rc;
  1176. rc = ioat_xor_val_self_test(device);
  1177. if (rc)
  1178. return rc;
  1179. return 0;
  1180. }
  1181. static int ioat3_irq_reinit(struct ioatdma_device *device)
  1182. {
  1183. int msixcnt = device->common.chancnt;
  1184. struct pci_dev *pdev = device->pdev;
  1185. int i;
  1186. struct msix_entry *msix;
  1187. struct ioat_chan_common *chan;
  1188. int err = 0;
  1189. switch (device->irq_mode) {
  1190. case IOAT_MSIX:
  1191. for (i = 0; i < msixcnt; i++) {
  1192. msix = &device->msix_entries[i];
  1193. chan = ioat_chan_by_index(device, i);
  1194. devm_free_irq(&pdev->dev, msix->vector, chan);
  1195. }
  1196. pci_disable_msix(pdev);
  1197. break;
  1198. case IOAT_MSIX_SINGLE:
  1199. msix = &device->msix_entries[0];
  1200. chan = ioat_chan_by_index(device, 0);
  1201. devm_free_irq(&pdev->dev, msix->vector, chan);
  1202. pci_disable_msix(pdev);
  1203. break;
  1204. case IOAT_MSI:
  1205. chan = ioat_chan_by_index(device, 0);
  1206. devm_free_irq(&pdev->dev, pdev->irq, chan);
  1207. pci_disable_msi(pdev);
  1208. break;
  1209. case IOAT_INTX:
  1210. chan = ioat_chan_by_index(device, 0);
  1211. devm_free_irq(&pdev->dev, pdev->irq, chan);
  1212. break;
  1213. default:
  1214. return 0;
  1215. }
  1216. device->irq_mode = IOAT_NOIRQ;
  1217. err = ioat_dma_setup_interrupts(device);
  1218. return err;
  1219. }
  1220. static int ioat3_reset_hw(struct ioat_chan_common *chan)
  1221. {
  1222. /* throw away whatever the channel was doing and get it
  1223. * initialized, with ioat3 specific workarounds
  1224. */
  1225. struct ioatdma_device *device = chan->device;
  1226. struct pci_dev *pdev = device->pdev;
  1227. u32 chanerr;
  1228. u16 dev_id;
  1229. int err;
  1230. ioat2_quiesce(chan, msecs_to_jiffies(100));
  1231. chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
  1232. writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
  1233. if (device->version < IOAT_VER_3_3) {
  1234. /* clear any pending errors */
  1235. err = pci_read_config_dword(pdev,
  1236. IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
  1237. if (err) {
  1238. dev_err(&pdev->dev,
  1239. "channel error register unreachable\n");
  1240. return err;
  1241. }
  1242. pci_write_config_dword(pdev,
  1243. IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
  1244. /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
  1245. * (workaround for spurious config parity error after restart)
  1246. */
  1247. pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
  1248. if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
  1249. pci_write_config_dword(pdev,
  1250. IOAT_PCI_DMAUNCERRSTS_OFFSET,
  1251. 0x10);
  1252. }
  1253. }
  1254. err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
  1255. if (err) {
  1256. dev_err(&pdev->dev, "Failed to reset!\n");
  1257. return err;
  1258. }
  1259. if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev))
  1260. err = ioat3_irq_reinit(device);
  1261. return err;
  1262. }
  1263. int ioat3_dma_probe(struct ioatdma_device *device, int dca)
  1264. {
  1265. struct pci_dev *pdev = device->pdev;
  1266. int dca_en = system_has_dca_enabled(pdev);
  1267. struct dma_device *dma;
  1268. struct dma_chan *c;
  1269. struct ioat_chan_common *chan;
  1270. bool is_raid_device = false;
  1271. int err;
  1272. u32 cap;
  1273. device->enumerate_channels = ioat2_enumerate_channels;
  1274. device->reset_hw = ioat3_reset_hw;
  1275. device->self_test = ioat3_dma_self_test;
  1276. dma = &device->common;
  1277. dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
  1278. dma->device_issue_pending = ioat2_issue_pending;
  1279. dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
  1280. dma->device_free_chan_resources = ioat2_free_chan_resources;
  1281. if (is_xeon_cb32(pdev))
  1282. dma->copy_align = 6;
  1283. dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
  1284. dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
  1285. cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
  1286. /* dca is incompatible with raid operations */
  1287. if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
  1288. cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
  1289. if (cap & IOAT_CAP_XOR) {
  1290. is_raid_device = true;
  1291. dma->max_xor = 8;
  1292. dma->xor_align = 6;
  1293. dma_cap_set(DMA_XOR, dma->cap_mask);
  1294. dma->device_prep_dma_xor = ioat3_prep_xor;
  1295. dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
  1296. dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
  1297. }
  1298. if (cap & IOAT_CAP_PQ) {
  1299. is_raid_device = true;
  1300. dma_set_maxpq(dma, 8, 0);
  1301. if (is_xeon_cb32(pdev))
  1302. dma->pq_align = 6;
  1303. else
  1304. dma->pq_align = 0;
  1305. dma_cap_set(DMA_PQ, dma->cap_mask);
  1306. dma->device_prep_dma_pq = ioat3_prep_pq;
  1307. dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
  1308. dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
  1309. if (!(cap & IOAT_CAP_XOR)) {
  1310. dma->max_xor = 8;
  1311. if (is_xeon_cb32(pdev))
  1312. dma->xor_align = 6;
  1313. else
  1314. dma->xor_align = 0;
  1315. dma_cap_set(DMA_XOR, dma->cap_mask);
  1316. dma->device_prep_dma_xor = ioat3_prep_pqxor;
  1317. dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
  1318. dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
  1319. }
  1320. }
  1321. if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) {
  1322. dma_cap_set(DMA_MEMSET, dma->cap_mask);
  1323. dma->device_prep_dma_memset = ioat3_prep_memset_lock;
  1324. }
  1325. dma->device_tx_status = ioat3_tx_status;
  1326. device->cleanup_fn = ioat3_cleanup_event;
  1327. device->timer_fn = ioat3_timer_event;
  1328. if (is_xeon_cb32(pdev)) {
  1329. dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
  1330. dma->device_prep_dma_xor_val = NULL;
  1331. dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
  1332. dma->device_prep_dma_pq_val = NULL;
  1333. }
  1334. err = ioat_probe(device);
  1335. if (err)
  1336. return err;
  1337. ioat_set_tcp_copy_break(262144);
  1338. list_for_each_entry(c, &dma->channels, device_node) {
  1339. chan = to_chan_common(c);
  1340. writel(IOAT_DMA_DCA_ANY_CPU,
  1341. chan->reg_base + IOAT_DCACTRL_OFFSET);
  1342. }
  1343. err = ioat_register(device);
  1344. if (err)
  1345. return err;
  1346. ioat_kobject_add(device, &ioat2_ktype);
  1347. if (dca)
  1348. device->dca = ioat3_dca_init(pdev, device->reg_base);
  1349. return 0;
  1350. }