qib_user_sdma.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897
  1. /*
  2. * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/mm.h>
  33. #include <linux/types.h>
  34. #include <linux/device.h>
  35. #include <linux/dmapool.h>
  36. #include <linux/slab.h>
  37. #include <linux/list.h>
  38. #include <linux/highmem.h>
  39. #include <linux/io.h>
  40. #include <linux/uio.h>
  41. #include <linux/rbtree.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/delay.h>
  44. #include "qib.h"
  45. #include "qib_user_sdma.h"
  46. /* minimum size of header */
  47. #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
  48. /* expected size of headers (for dma_pool) */
  49. #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
  50. /* attempt to drain the queue for 5secs */
  51. #define QIB_USER_SDMA_DRAIN_TIMEOUT 500
  52. struct qib_user_sdma_pkt {
  53. u8 naddr; /* dimension of addr (1..3) ... */
  54. u32 counter; /* sdma pkts queued counter for this entry */
  55. u64 added; /* global descq number of entries */
  56. struct {
  57. u32 offset; /* offset for kvaddr, addr */
  58. u32 length; /* length in page */
  59. u8 put_page; /* should we put_page? */
  60. u8 dma_mapped; /* is page dma_mapped? */
  61. struct page *page; /* may be NULL (coherent mem) */
  62. void *kvaddr; /* FIXME: only for pio hack */
  63. dma_addr_t addr;
  64. } addr[4]; /* max pages, any more and we coalesce */
  65. struct list_head list; /* list element */
  66. };
  67. struct qib_user_sdma_queue {
  68. /*
  69. * pkts sent to dma engine are queued on this
  70. * list head. the type of the elements of this
  71. * list are struct qib_user_sdma_pkt...
  72. */
  73. struct list_head sent;
  74. /* headers with expected length are allocated from here... */
  75. char header_cache_name[64];
  76. struct dma_pool *header_cache;
  77. /* packets are allocated from the slab cache... */
  78. char pkt_slab_name[64];
  79. struct kmem_cache *pkt_slab;
  80. /* as packets go on the queued queue, they are counted... */
  81. u32 counter;
  82. u32 sent_counter;
  83. /* dma page table */
  84. struct rb_root dma_pages_root;
  85. /* protect everything above... */
  86. struct mutex lock;
  87. };
  88. struct qib_user_sdma_queue *
  89. qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
  90. {
  91. struct qib_user_sdma_queue *pq =
  92. kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
  93. if (!pq)
  94. goto done;
  95. pq->counter = 0;
  96. pq->sent_counter = 0;
  97. INIT_LIST_HEAD(&pq->sent);
  98. mutex_init(&pq->lock);
  99. snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
  100. "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
  101. pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
  102. sizeof(struct qib_user_sdma_pkt),
  103. 0, 0, NULL);
  104. if (!pq->pkt_slab)
  105. goto err_kfree;
  106. snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
  107. "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
  108. pq->header_cache = dma_pool_create(pq->header_cache_name,
  109. dev,
  110. QIB_USER_SDMA_EXP_HEADER_LENGTH,
  111. 4, 0);
  112. if (!pq->header_cache)
  113. goto err_slab;
  114. pq->dma_pages_root = RB_ROOT;
  115. goto done;
  116. err_slab:
  117. kmem_cache_destroy(pq->pkt_slab);
  118. err_kfree:
  119. kfree(pq);
  120. pq = NULL;
  121. done:
  122. return pq;
  123. }
  124. static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
  125. int i, size_t offset, size_t len,
  126. int put_page, int dma_mapped,
  127. struct page *page,
  128. void *kvaddr, dma_addr_t dma_addr)
  129. {
  130. pkt->addr[i].offset = offset;
  131. pkt->addr[i].length = len;
  132. pkt->addr[i].put_page = put_page;
  133. pkt->addr[i].dma_mapped = dma_mapped;
  134. pkt->addr[i].page = page;
  135. pkt->addr[i].kvaddr = kvaddr;
  136. pkt->addr[i].addr = dma_addr;
  137. }
  138. static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
  139. u32 counter, size_t offset,
  140. size_t len, int dma_mapped,
  141. struct page *page,
  142. void *kvaddr, dma_addr_t dma_addr)
  143. {
  144. pkt->naddr = 1;
  145. pkt->counter = counter;
  146. qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
  147. kvaddr, dma_addr);
  148. }
  149. /* we've too many pages in the iovec, coalesce to a single page */
  150. static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
  151. struct qib_user_sdma_pkt *pkt,
  152. const struct iovec *iov,
  153. unsigned long niov)
  154. {
  155. int ret = 0;
  156. struct page *page = alloc_page(GFP_KERNEL);
  157. void *mpage_save;
  158. char *mpage;
  159. int i;
  160. int len = 0;
  161. dma_addr_t dma_addr;
  162. if (!page) {
  163. ret = -ENOMEM;
  164. goto done;
  165. }
  166. mpage = kmap(page);
  167. mpage_save = mpage;
  168. for (i = 0; i < niov; i++) {
  169. int cfur;
  170. cfur = copy_from_user(mpage,
  171. iov[i].iov_base, iov[i].iov_len);
  172. if (cfur) {
  173. ret = -EFAULT;
  174. goto free_unmap;
  175. }
  176. mpage += iov[i].iov_len;
  177. len += iov[i].iov_len;
  178. }
  179. dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
  180. DMA_TO_DEVICE);
  181. if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
  182. ret = -ENOMEM;
  183. goto free_unmap;
  184. }
  185. qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
  186. dma_addr);
  187. pkt->naddr = 2;
  188. goto done;
  189. free_unmap:
  190. kunmap(page);
  191. __free_page(page);
  192. done:
  193. return ret;
  194. }
  195. /*
  196. * How many pages in this iovec element?
  197. */
  198. static int qib_user_sdma_num_pages(const struct iovec *iov)
  199. {
  200. const unsigned long addr = (unsigned long) iov->iov_base;
  201. const unsigned long len = iov->iov_len;
  202. const unsigned long spage = addr & PAGE_MASK;
  203. const unsigned long epage = (addr + len - 1) & PAGE_MASK;
  204. return 1 + ((epage - spage) >> PAGE_SHIFT);
  205. }
  206. /*
  207. * Truncate length to page boundry.
  208. */
  209. static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
  210. {
  211. const unsigned long offset = addr & ~PAGE_MASK;
  212. return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
  213. }
  214. static void qib_user_sdma_free_pkt_frag(struct device *dev,
  215. struct qib_user_sdma_queue *pq,
  216. struct qib_user_sdma_pkt *pkt,
  217. int frag)
  218. {
  219. const int i = frag;
  220. if (pkt->addr[i].page) {
  221. if (pkt->addr[i].dma_mapped)
  222. dma_unmap_page(dev,
  223. pkt->addr[i].addr,
  224. pkt->addr[i].length,
  225. DMA_TO_DEVICE);
  226. if (pkt->addr[i].kvaddr)
  227. kunmap(pkt->addr[i].page);
  228. if (pkt->addr[i].put_page)
  229. put_page(pkt->addr[i].page);
  230. else
  231. __free_page(pkt->addr[i].page);
  232. } else if (pkt->addr[i].kvaddr)
  233. /* free coherent mem from cache... */
  234. dma_pool_free(pq->header_cache,
  235. pkt->addr[i].kvaddr, pkt->addr[i].addr);
  236. }
  237. /* return number of pages pinned... */
  238. static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
  239. struct qib_user_sdma_pkt *pkt,
  240. unsigned long addr, int tlen, int npages)
  241. {
  242. struct page *pages[2];
  243. int j;
  244. int ret;
  245. ret = get_user_pages(current, current->mm, addr,
  246. npages, 0, 1, pages, NULL);
  247. if (ret != npages) {
  248. int i;
  249. for (i = 0; i < ret; i++)
  250. put_page(pages[i]);
  251. ret = -ENOMEM;
  252. goto done;
  253. }
  254. for (j = 0; j < npages; j++) {
  255. /* map the pages... */
  256. const int flen = qib_user_sdma_page_length(addr, tlen);
  257. dma_addr_t dma_addr =
  258. dma_map_page(&dd->pcidev->dev,
  259. pages[j], 0, flen, DMA_TO_DEVICE);
  260. unsigned long fofs = addr & ~PAGE_MASK;
  261. if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
  262. ret = -ENOMEM;
  263. goto done;
  264. }
  265. qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
  266. pages[j], kmap(pages[j]), dma_addr);
  267. pkt->naddr++;
  268. addr += flen;
  269. tlen -= flen;
  270. }
  271. done:
  272. return ret;
  273. }
  274. static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
  275. struct qib_user_sdma_queue *pq,
  276. struct qib_user_sdma_pkt *pkt,
  277. const struct iovec *iov,
  278. unsigned long niov)
  279. {
  280. int ret = 0;
  281. unsigned long idx;
  282. for (idx = 0; idx < niov; idx++) {
  283. const int npages = qib_user_sdma_num_pages(iov + idx);
  284. const unsigned long addr = (unsigned long) iov[idx].iov_base;
  285. ret = qib_user_sdma_pin_pages(dd, pkt, addr,
  286. iov[idx].iov_len, npages);
  287. if (ret < 0)
  288. goto free_pkt;
  289. }
  290. goto done;
  291. free_pkt:
  292. for (idx = 0; idx < pkt->naddr; idx++)
  293. qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
  294. done:
  295. return ret;
  296. }
  297. static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
  298. struct qib_user_sdma_queue *pq,
  299. struct qib_user_sdma_pkt *pkt,
  300. const struct iovec *iov,
  301. unsigned long niov, int npages)
  302. {
  303. int ret = 0;
  304. if (npages >= ARRAY_SIZE(pkt->addr))
  305. ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
  306. else
  307. ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
  308. return ret;
  309. }
  310. /* free a packet list -- return counter value of last packet */
  311. static void qib_user_sdma_free_pkt_list(struct device *dev,
  312. struct qib_user_sdma_queue *pq,
  313. struct list_head *list)
  314. {
  315. struct qib_user_sdma_pkt *pkt, *pkt_next;
  316. list_for_each_entry_safe(pkt, pkt_next, list, list) {
  317. int i;
  318. for (i = 0; i < pkt->naddr; i++)
  319. qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
  320. kmem_cache_free(pq->pkt_slab, pkt);
  321. }
  322. }
  323. /*
  324. * copy headers, coalesce etc -- pq->lock must be held
  325. *
  326. * we queue all the packets to list, returning the
  327. * number of bytes total. list must be empty initially,
  328. * as, if there is an error we clean it...
  329. */
  330. static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
  331. struct qib_user_sdma_queue *pq,
  332. struct list_head *list,
  333. const struct iovec *iov,
  334. unsigned long niov,
  335. int maxpkts)
  336. {
  337. unsigned long idx = 0;
  338. int ret = 0;
  339. int npkts = 0;
  340. struct page *page = NULL;
  341. __le32 *pbc;
  342. dma_addr_t dma_addr;
  343. struct qib_user_sdma_pkt *pkt = NULL;
  344. size_t len;
  345. size_t nw;
  346. u32 counter = pq->counter;
  347. int dma_mapped = 0;
  348. while (idx < niov && npkts < maxpkts) {
  349. const unsigned long addr = (unsigned long) iov[idx].iov_base;
  350. const unsigned long idx_save = idx;
  351. unsigned pktnw;
  352. unsigned pktnwc;
  353. int nfrags = 0;
  354. int npages = 0;
  355. int cfur;
  356. dma_mapped = 0;
  357. len = iov[idx].iov_len;
  358. nw = len >> 2;
  359. page = NULL;
  360. pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
  361. if (!pkt) {
  362. ret = -ENOMEM;
  363. goto free_list;
  364. }
  365. if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
  366. len > PAGE_SIZE || len & 3 || addr & 3) {
  367. ret = -EINVAL;
  368. goto free_pkt;
  369. }
  370. if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
  371. pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
  372. &dma_addr);
  373. else
  374. pbc = NULL;
  375. if (!pbc) {
  376. page = alloc_page(GFP_KERNEL);
  377. if (!page) {
  378. ret = -ENOMEM;
  379. goto free_pkt;
  380. }
  381. pbc = kmap(page);
  382. }
  383. cfur = copy_from_user(pbc, iov[idx].iov_base, len);
  384. if (cfur) {
  385. ret = -EFAULT;
  386. goto free_pbc;
  387. }
  388. /*
  389. * This assignment is a bit strange. it's because the
  390. * the pbc counts the number of 32 bit words in the full
  391. * packet _except_ the first word of the pbc itself...
  392. */
  393. pktnwc = nw - 1;
  394. /*
  395. * pktnw computation yields the number of 32 bit words
  396. * that the caller has indicated in the PBC. note that
  397. * this is one less than the total number of words that
  398. * goes to the send DMA engine as the first 32 bit word
  399. * of the PBC itself is not counted. Armed with this count,
  400. * we can verify that the packet is consistent with the
  401. * iovec lengths.
  402. */
  403. pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
  404. if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
  405. ret = -EINVAL;
  406. goto free_pbc;
  407. }
  408. idx++;
  409. while (pktnwc < pktnw && idx < niov) {
  410. const size_t slen = iov[idx].iov_len;
  411. const unsigned long faddr =
  412. (unsigned long) iov[idx].iov_base;
  413. if (slen & 3 || faddr & 3 || !slen ||
  414. slen > PAGE_SIZE) {
  415. ret = -EINVAL;
  416. goto free_pbc;
  417. }
  418. npages++;
  419. if ((faddr & PAGE_MASK) !=
  420. ((faddr + slen - 1) & PAGE_MASK))
  421. npages++;
  422. pktnwc += slen >> 2;
  423. idx++;
  424. nfrags++;
  425. }
  426. if (pktnwc != pktnw) {
  427. ret = -EINVAL;
  428. goto free_pbc;
  429. }
  430. if (page) {
  431. dma_addr = dma_map_page(&dd->pcidev->dev,
  432. page, 0, len, DMA_TO_DEVICE);
  433. if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
  434. ret = -ENOMEM;
  435. goto free_pbc;
  436. }
  437. dma_mapped = 1;
  438. }
  439. qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
  440. page, pbc, dma_addr);
  441. if (nfrags) {
  442. ret = qib_user_sdma_init_payload(dd, pq, pkt,
  443. iov + idx_save + 1,
  444. nfrags, npages);
  445. if (ret < 0)
  446. goto free_pbc_dma;
  447. }
  448. counter++;
  449. npkts++;
  450. list_add_tail(&pkt->list, list);
  451. }
  452. ret = idx;
  453. goto done;
  454. free_pbc_dma:
  455. if (dma_mapped)
  456. dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
  457. free_pbc:
  458. if (page) {
  459. kunmap(page);
  460. __free_page(page);
  461. } else
  462. dma_pool_free(pq->header_cache, pbc, dma_addr);
  463. free_pkt:
  464. kmem_cache_free(pq->pkt_slab, pkt);
  465. free_list:
  466. qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
  467. done:
  468. return ret;
  469. }
  470. static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
  471. u32 c)
  472. {
  473. pq->sent_counter = c;
  474. }
  475. /* try to clean out queue -- needs pq->lock */
  476. static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
  477. struct qib_user_sdma_queue *pq)
  478. {
  479. struct qib_devdata *dd = ppd->dd;
  480. struct list_head free_list;
  481. struct qib_user_sdma_pkt *pkt;
  482. struct qib_user_sdma_pkt *pkt_prev;
  483. int ret = 0;
  484. INIT_LIST_HEAD(&free_list);
  485. list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
  486. s64 descd = ppd->sdma_descq_removed - pkt->added;
  487. if (descd < 0)
  488. break;
  489. list_move_tail(&pkt->list, &free_list);
  490. /* one more packet cleaned */
  491. ret++;
  492. }
  493. if (!list_empty(&free_list)) {
  494. u32 counter;
  495. pkt = list_entry(free_list.prev,
  496. struct qib_user_sdma_pkt, list);
  497. counter = pkt->counter;
  498. qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
  499. qib_user_sdma_set_complete_counter(pq, counter);
  500. }
  501. return ret;
  502. }
  503. void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
  504. {
  505. if (!pq)
  506. return;
  507. kmem_cache_destroy(pq->pkt_slab);
  508. dma_pool_destroy(pq->header_cache);
  509. kfree(pq);
  510. }
  511. /* clean descriptor queue, returns > 0 if some elements cleaned */
  512. static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
  513. {
  514. int ret;
  515. unsigned long flags;
  516. spin_lock_irqsave(&ppd->sdma_lock, flags);
  517. ret = qib_sdma_make_progress(ppd);
  518. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  519. return ret;
  520. }
  521. /* we're in close, drain packets so that we can cleanup successfully... */
  522. void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
  523. struct qib_user_sdma_queue *pq)
  524. {
  525. struct qib_devdata *dd = ppd->dd;
  526. int i;
  527. if (!pq)
  528. return;
  529. for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
  530. mutex_lock(&pq->lock);
  531. if (list_empty(&pq->sent)) {
  532. mutex_unlock(&pq->lock);
  533. break;
  534. }
  535. qib_user_sdma_hwqueue_clean(ppd);
  536. qib_user_sdma_queue_clean(ppd, pq);
  537. mutex_unlock(&pq->lock);
  538. msleep(10);
  539. }
  540. if (!list_empty(&pq->sent)) {
  541. struct list_head free_list;
  542. qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
  543. INIT_LIST_HEAD(&free_list);
  544. mutex_lock(&pq->lock);
  545. list_splice_init(&pq->sent, &free_list);
  546. qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
  547. mutex_unlock(&pq->lock);
  548. }
  549. }
  550. static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
  551. u64 addr, u64 dwlen, u64 dwoffset)
  552. {
  553. u8 tmpgen;
  554. tmpgen = ppd->sdma_generation;
  555. return cpu_to_le64(/* SDmaPhyAddr[31:0] */
  556. ((addr & 0xfffffffcULL) << 32) |
  557. /* SDmaGeneration[1:0] */
  558. ((tmpgen & 3ULL) << 30) |
  559. /* SDmaDwordCount[10:0] */
  560. ((dwlen & 0x7ffULL) << 16) |
  561. /* SDmaBufOffset[12:2] */
  562. (dwoffset & 0x7ffULL));
  563. }
  564. static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
  565. {
  566. return descq | cpu_to_le64(1ULL << 12);
  567. }
  568. static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
  569. {
  570. /* last */ /* dma head */
  571. return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
  572. }
  573. static inline __le64 qib_sdma_make_desc1(u64 addr)
  574. {
  575. /* SDmaPhyAddr[47:32] */
  576. return cpu_to_le64(addr >> 32);
  577. }
  578. static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
  579. struct qib_user_sdma_pkt *pkt, int idx,
  580. unsigned ofs, u16 tail)
  581. {
  582. const u64 addr = (u64) pkt->addr[idx].addr +
  583. (u64) pkt->addr[idx].offset;
  584. const u64 dwlen = (u64) pkt->addr[idx].length / 4;
  585. __le64 *descqp;
  586. __le64 descq0;
  587. descqp = &ppd->sdma_descq[tail].qw[0];
  588. descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
  589. if (idx == 0)
  590. descq0 = qib_sdma_make_first_desc0(descq0);
  591. if (idx == pkt->naddr - 1)
  592. descq0 = qib_sdma_make_last_desc0(descq0);
  593. descqp[0] = descq0;
  594. descqp[1] = qib_sdma_make_desc1(addr);
  595. }
  596. /* pq->lock must be held, get packets on the wire... */
  597. static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
  598. struct qib_user_sdma_queue *pq,
  599. struct list_head *pktlist)
  600. {
  601. struct qib_devdata *dd = ppd->dd;
  602. int ret = 0;
  603. unsigned long flags;
  604. u16 tail;
  605. u8 generation;
  606. u64 descq_added;
  607. if (list_empty(pktlist))
  608. return 0;
  609. if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
  610. return -ECOMM;
  611. spin_lock_irqsave(&ppd->sdma_lock, flags);
  612. /* keep a copy for restoring purposes in case of problems */
  613. generation = ppd->sdma_generation;
  614. descq_added = ppd->sdma_descq_added;
  615. if (unlikely(!__qib_sdma_running(ppd))) {
  616. ret = -ECOMM;
  617. goto unlock;
  618. }
  619. tail = ppd->sdma_descq_tail;
  620. while (!list_empty(pktlist)) {
  621. struct qib_user_sdma_pkt *pkt =
  622. list_entry(pktlist->next, struct qib_user_sdma_pkt,
  623. list);
  624. int i;
  625. unsigned ofs = 0;
  626. u16 dtail = tail;
  627. if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
  628. goto unlock_check_tail;
  629. for (i = 0; i < pkt->naddr; i++) {
  630. qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
  631. ofs += pkt->addr[i].length >> 2;
  632. if (++tail == ppd->sdma_descq_cnt) {
  633. tail = 0;
  634. ++ppd->sdma_generation;
  635. }
  636. }
  637. if ((ofs << 2) > ppd->ibmaxlen) {
  638. ret = -EMSGSIZE;
  639. goto unlock;
  640. }
  641. /*
  642. * If the packet is >= 2KB mtu equivalent, we have to use
  643. * the large buffers, and have to mark each descriptor as
  644. * part of a large buffer packet.
  645. */
  646. if (ofs > dd->piosize2kmax_dwords) {
  647. for (i = 0; i < pkt->naddr; i++) {
  648. ppd->sdma_descq[dtail].qw[0] |=
  649. cpu_to_le64(1ULL << 14);
  650. if (++dtail == ppd->sdma_descq_cnt)
  651. dtail = 0;
  652. }
  653. }
  654. ppd->sdma_descq_added += pkt->naddr;
  655. pkt->added = ppd->sdma_descq_added;
  656. list_move_tail(&pkt->list, &pq->sent);
  657. ret++;
  658. }
  659. unlock_check_tail:
  660. /* advance the tail on the chip if necessary */
  661. if (ppd->sdma_descq_tail != tail)
  662. dd->f_sdma_update_tail(ppd, tail);
  663. unlock:
  664. if (unlikely(ret < 0)) {
  665. ppd->sdma_generation = generation;
  666. ppd->sdma_descq_added = descq_added;
  667. }
  668. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  669. return ret;
  670. }
  671. int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
  672. struct qib_user_sdma_queue *pq,
  673. const struct iovec *iov,
  674. unsigned long dim)
  675. {
  676. struct qib_devdata *dd = rcd->dd;
  677. struct qib_pportdata *ppd = rcd->ppd;
  678. int ret = 0;
  679. struct list_head list;
  680. int npkts = 0;
  681. INIT_LIST_HEAD(&list);
  682. mutex_lock(&pq->lock);
  683. /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
  684. if (!qib_sdma_running(ppd))
  685. goto done_unlock;
  686. if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
  687. qib_user_sdma_hwqueue_clean(ppd);
  688. qib_user_sdma_queue_clean(ppd, pq);
  689. }
  690. while (dim) {
  691. const int mxp = 8;
  692. down_write(&current->mm->mmap_sem);
  693. ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
  694. up_write(&current->mm->mmap_sem);
  695. if (ret <= 0)
  696. goto done_unlock;
  697. else {
  698. dim -= ret;
  699. iov += ret;
  700. }
  701. /* force packets onto the sdma hw queue... */
  702. if (!list_empty(&list)) {
  703. /*
  704. * Lazily clean hw queue. the 4 is a guess of about
  705. * how many sdma descriptors a packet will take (it
  706. * doesn't have to be perfect).
  707. */
  708. if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
  709. qib_user_sdma_hwqueue_clean(ppd);
  710. qib_user_sdma_queue_clean(ppd, pq);
  711. }
  712. ret = qib_user_sdma_push_pkts(ppd, pq, &list);
  713. if (ret < 0)
  714. goto done_unlock;
  715. else {
  716. npkts += ret;
  717. pq->counter += ret;
  718. if (!list_empty(&list))
  719. goto done_unlock;
  720. }
  721. }
  722. }
  723. done_unlock:
  724. if (!list_empty(&list))
  725. qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
  726. mutex_unlock(&pq->lock);
  727. return (ret < 0) ? ret : npkts;
  728. }
  729. int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
  730. struct qib_user_sdma_queue *pq)
  731. {
  732. int ret = 0;
  733. mutex_lock(&pq->lock);
  734. qib_user_sdma_hwqueue_clean(ppd);
  735. ret = qib_user_sdma_queue_clean(ppd, pq);
  736. mutex_unlock(&pq->lock);
  737. return ret;
  738. }
  739. u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
  740. {
  741. return pq ? pq->sent_counter : 0;
  742. }
  743. u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
  744. {
  745. return pq ? pq->counter : 0;
  746. }