ntb_transport.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2012 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * BSD LICENSE
  14. *
  15. * Copyright(c) 2012 Intel Corporation. All rights reserved.
  16. *
  17. * Redistribution and use in source and binary forms, with or without
  18. * modification, are permitted provided that the following conditions
  19. * are met:
  20. *
  21. * * Redistributions of source code must retain the above copyright
  22. * notice, this list of conditions and the following disclaimer.
  23. * * Redistributions in binary form must reproduce the above copy
  24. * notice, this list of conditions and the following disclaimer in
  25. * the documentation and/or other materials provided with the
  26. * distribution.
  27. * * Neither the name of Intel Corporation nor the names of its
  28. * contributors may be used to endorse or promote products derived
  29. * from this software without specific prior written permission.
  30. *
  31. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  32. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  33. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  34. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  35. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  36. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  37. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  38. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  39. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  40. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  41. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42. *
  43. * Intel PCIe NTB Linux driver
  44. *
  45. * Contact Information:
  46. * Jon Mason <jon.mason@intel.com>
  47. */
  48. #include <linux/debugfs.h>
  49. #include <linux/delay.h>
  50. #include <linux/dmaengine.h>
  51. #include <linux/dma-mapping.h>
  52. #include <linux/errno.h>
  53. #include <linux/export.h>
  54. #include <linux/interrupt.h>
  55. #include <linux/module.h>
  56. #include <linux/pci.h>
  57. #include <linux/slab.h>
  58. #include <linux/types.h>
  59. #include <linux/ntb.h>
  60. #include "ntb_hw.h"
  61. #define NTB_TRANSPORT_VERSION 3
  62. static unsigned int transport_mtu = 0x401E;
  63. module_param(transport_mtu, uint, 0644);
  64. MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
  65. static unsigned char max_num_clients;
  66. module_param(max_num_clients, byte, 0644);
  67. MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
  68. static unsigned int copy_bytes = 1024;
  69. module_param(copy_bytes, uint, 0644);
  70. MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
  71. struct ntb_queue_entry {
  72. /* ntb_queue list reference */
  73. struct list_head entry;
  74. /* pointers to data to be transfered */
  75. void *cb_data;
  76. void *buf;
  77. unsigned int len;
  78. unsigned int flags;
  79. struct ntb_transport_qp *qp;
  80. union {
  81. struct ntb_payload_header __iomem *tx_hdr;
  82. struct ntb_payload_header *rx_hdr;
  83. };
  84. unsigned int index;
  85. };
  86. struct ntb_rx_info {
  87. unsigned int entry;
  88. };
  89. struct ntb_transport_qp {
  90. struct ntb_transport *transport;
  91. struct ntb_device *ndev;
  92. void *cb_data;
  93. struct dma_chan *dma_chan;
  94. bool client_ready;
  95. bool qp_link;
  96. u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
  97. struct ntb_rx_info __iomem *rx_info;
  98. struct ntb_rx_info *remote_rx_info;
  99. void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
  100. void *data, int len);
  101. struct list_head tx_free_q;
  102. spinlock_t ntb_tx_free_q_lock;
  103. void __iomem *tx_mw;
  104. dma_addr_t tx_mw_phys;
  105. unsigned int tx_index;
  106. unsigned int tx_max_entry;
  107. unsigned int tx_max_frame;
  108. void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
  109. void *data, int len);
  110. struct tasklet_struct rx_work;
  111. struct list_head rx_pend_q;
  112. struct list_head rx_free_q;
  113. spinlock_t ntb_rx_pend_q_lock;
  114. spinlock_t ntb_rx_free_q_lock;
  115. void *rx_buff;
  116. unsigned int rx_index;
  117. unsigned int rx_max_entry;
  118. unsigned int rx_max_frame;
  119. dma_cookie_t last_cookie;
  120. void (*event_handler) (void *data, int status);
  121. struct delayed_work link_work;
  122. struct work_struct link_cleanup;
  123. struct dentry *debugfs_dir;
  124. struct dentry *debugfs_stats;
  125. /* Stats */
  126. u64 rx_bytes;
  127. u64 rx_pkts;
  128. u64 rx_ring_empty;
  129. u64 rx_err_no_buf;
  130. u64 rx_err_oflow;
  131. u64 rx_err_ver;
  132. u64 rx_memcpy;
  133. u64 rx_async;
  134. u64 tx_bytes;
  135. u64 tx_pkts;
  136. u64 tx_ring_full;
  137. u64 tx_err_no_buf;
  138. u64 tx_memcpy;
  139. u64 tx_async;
  140. };
  141. struct ntb_transport_mw {
  142. size_t size;
  143. void *virt_addr;
  144. dma_addr_t dma_addr;
  145. };
  146. struct ntb_transport_client_dev {
  147. struct list_head entry;
  148. struct device dev;
  149. };
  150. struct ntb_transport {
  151. struct list_head entry;
  152. struct list_head client_devs;
  153. struct ntb_device *ndev;
  154. struct ntb_transport_mw *mw;
  155. struct ntb_transport_qp *qps;
  156. unsigned int max_qps;
  157. unsigned long qp_bitmap;
  158. bool transport_link;
  159. struct delayed_work link_work;
  160. struct work_struct link_cleanup;
  161. };
  162. enum {
  163. DESC_DONE_FLAG = 1 << 0,
  164. LINK_DOWN_FLAG = 1 << 1,
  165. };
  166. struct ntb_payload_header {
  167. unsigned int ver;
  168. unsigned int len;
  169. unsigned int flags;
  170. };
  171. enum {
  172. VERSION = 0,
  173. QP_LINKS,
  174. NUM_QPS,
  175. NUM_MWS,
  176. MW0_SZ_HIGH,
  177. MW0_SZ_LOW,
  178. MW1_SZ_HIGH,
  179. MW1_SZ_LOW,
  180. MAX_SPAD,
  181. };
  182. #define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev))
  183. #define NTB_QP_DEF_NUM_ENTRIES 100
  184. #define NTB_LINK_DOWN_TIMEOUT 10
  185. static int ntb_match_bus(struct device *dev, struct device_driver *drv)
  186. {
  187. return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
  188. }
  189. static int ntb_client_probe(struct device *dev)
  190. {
  191. const struct ntb_client *drv = container_of(dev->driver,
  192. struct ntb_client, driver);
  193. struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
  194. int rc = -EINVAL;
  195. get_device(dev);
  196. if (drv && drv->probe)
  197. rc = drv->probe(pdev);
  198. if (rc)
  199. put_device(dev);
  200. return rc;
  201. }
  202. static int ntb_client_remove(struct device *dev)
  203. {
  204. const struct ntb_client *drv = container_of(dev->driver,
  205. struct ntb_client, driver);
  206. struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
  207. if (drv && drv->remove)
  208. drv->remove(pdev);
  209. put_device(dev);
  210. return 0;
  211. }
  212. static struct bus_type ntb_bus_type = {
  213. .name = "ntb_bus",
  214. .match = ntb_match_bus,
  215. .probe = ntb_client_probe,
  216. .remove = ntb_client_remove,
  217. };
  218. static LIST_HEAD(ntb_transport_list);
  219. static int ntb_bus_init(struct ntb_transport *nt)
  220. {
  221. if (list_empty(&ntb_transport_list)) {
  222. int rc = bus_register(&ntb_bus_type);
  223. if (rc)
  224. return rc;
  225. }
  226. list_add(&nt->entry, &ntb_transport_list);
  227. return 0;
  228. }
  229. static void ntb_bus_remove(struct ntb_transport *nt)
  230. {
  231. struct ntb_transport_client_dev *client_dev, *cd;
  232. list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
  233. dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
  234. dev_name(&client_dev->dev));
  235. list_del(&client_dev->entry);
  236. device_unregister(&client_dev->dev);
  237. }
  238. list_del(&nt->entry);
  239. if (list_empty(&ntb_transport_list))
  240. bus_unregister(&ntb_bus_type);
  241. }
  242. static void ntb_client_release(struct device *dev)
  243. {
  244. struct ntb_transport_client_dev *client_dev;
  245. client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
  246. kfree(client_dev);
  247. }
  248. /**
  249. * ntb_unregister_client_dev - Unregister NTB client device
  250. * @device_name: Name of NTB client device
  251. *
  252. * Unregister an NTB client device with the NTB transport layer
  253. */
  254. void ntb_unregister_client_dev(char *device_name)
  255. {
  256. struct ntb_transport_client_dev *client, *cd;
  257. struct ntb_transport *nt;
  258. list_for_each_entry(nt, &ntb_transport_list, entry)
  259. list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
  260. if (!strncmp(dev_name(&client->dev), device_name,
  261. strlen(device_name))) {
  262. list_del(&client->entry);
  263. device_unregister(&client->dev);
  264. }
  265. }
  266. EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
  267. /**
  268. * ntb_register_client_dev - Register NTB client device
  269. * @device_name: Name of NTB client device
  270. *
  271. * Register an NTB client device with the NTB transport layer
  272. */
  273. int ntb_register_client_dev(char *device_name)
  274. {
  275. struct ntb_transport_client_dev *client_dev;
  276. struct ntb_transport *nt;
  277. int rc, i = 0;
  278. if (list_empty(&ntb_transport_list))
  279. return -ENODEV;
  280. list_for_each_entry(nt, &ntb_transport_list, entry) {
  281. struct device *dev;
  282. client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
  283. GFP_KERNEL);
  284. if (!client_dev) {
  285. rc = -ENOMEM;
  286. goto err;
  287. }
  288. dev = &client_dev->dev;
  289. /* setup and register client devices */
  290. dev_set_name(dev, "%s%d", device_name, i);
  291. dev->bus = &ntb_bus_type;
  292. dev->release = ntb_client_release;
  293. dev->parent = &ntb_query_pdev(nt->ndev)->dev;
  294. rc = device_register(dev);
  295. if (rc) {
  296. kfree(client_dev);
  297. goto err;
  298. }
  299. list_add_tail(&client_dev->entry, &nt->client_devs);
  300. i++;
  301. }
  302. return 0;
  303. err:
  304. ntb_unregister_client_dev(device_name);
  305. return rc;
  306. }
  307. EXPORT_SYMBOL_GPL(ntb_register_client_dev);
  308. /**
  309. * ntb_register_client - Register NTB client driver
  310. * @drv: NTB client driver to be registered
  311. *
  312. * Register an NTB client driver with the NTB transport layer
  313. *
  314. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  315. */
  316. int ntb_register_client(struct ntb_client *drv)
  317. {
  318. drv->driver.bus = &ntb_bus_type;
  319. if (list_empty(&ntb_transport_list))
  320. return -ENODEV;
  321. return driver_register(&drv->driver);
  322. }
  323. EXPORT_SYMBOL_GPL(ntb_register_client);
  324. /**
  325. * ntb_unregister_client - Unregister NTB client driver
  326. * @drv: NTB client driver to be unregistered
  327. *
  328. * Unregister an NTB client driver with the NTB transport layer
  329. *
  330. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  331. */
  332. void ntb_unregister_client(struct ntb_client *drv)
  333. {
  334. driver_unregister(&drv->driver);
  335. }
  336. EXPORT_SYMBOL_GPL(ntb_unregister_client);
  337. static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
  338. loff_t *offp)
  339. {
  340. struct ntb_transport_qp *qp;
  341. char *buf;
  342. ssize_t ret, out_offset, out_count;
  343. out_count = 1000;
  344. buf = kmalloc(out_count, GFP_KERNEL);
  345. if (!buf)
  346. return -ENOMEM;
  347. qp = filp->private_data;
  348. out_offset = 0;
  349. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  350. "NTB QP stats\n");
  351. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  352. "rx_bytes - \t%llu\n", qp->rx_bytes);
  353. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  354. "rx_pkts - \t%llu\n", qp->rx_pkts);
  355. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  356. "rx_memcpy - \t%llu\n", qp->rx_memcpy);
  357. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  358. "rx_async - \t%llu\n", qp->rx_async);
  359. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  360. "rx_ring_empty - %llu\n", qp->rx_ring_empty);
  361. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  362. "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
  363. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  364. "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
  365. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  366. "rx_err_ver - \t%llu\n", qp->rx_err_ver);
  367. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  368. "rx_buff - \t%p\n", qp->rx_buff);
  369. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  370. "rx_index - \t%u\n", qp->rx_index);
  371. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  372. "rx_max_entry - \t%u\n", qp->rx_max_entry);
  373. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  374. "tx_bytes - \t%llu\n", qp->tx_bytes);
  375. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  376. "tx_pkts - \t%llu\n", qp->tx_pkts);
  377. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  378. "tx_memcpy - \t%llu\n", qp->tx_memcpy);
  379. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  380. "tx_async - \t%llu\n", qp->tx_async);
  381. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  382. "tx_ring_full - \t%llu\n", qp->tx_ring_full);
  383. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  384. "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
  385. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  386. "tx_mw - \t%p\n", qp->tx_mw);
  387. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  388. "tx_index - \t%u\n", qp->tx_index);
  389. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  390. "tx_max_entry - \t%u\n", qp->tx_max_entry);
  391. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  392. "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
  393. "Up" : "Down");
  394. if (out_offset > out_count)
  395. out_offset = out_count;
  396. ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
  397. kfree(buf);
  398. return ret;
  399. }
  400. static const struct file_operations ntb_qp_debugfs_stats = {
  401. .owner = THIS_MODULE,
  402. .open = simple_open,
  403. .read = debugfs_read,
  404. };
  405. static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
  406. struct list_head *list)
  407. {
  408. unsigned long flags;
  409. spin_lock_irqsave(lock, flags);
  410. list_add_tail(entry, list);
  411. spin_unlock_irqrestore(lock, flags);
  412. }
  413. static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
  414. struct list_head *list)
  415. {
  416. struct ntb_queue_entry *entry;
  417. unsigned long flags;
  418. spin_lock_irqsave(lock, flags);
  419. if (list_empty(list)) {
  420. entry = NULL;
  421. goto out;
  422. }
  423. entry = list_first_entry(list, struct ntb_queue_entry, entry);
  424. list_del(&entry->entry);
  425. out:
  426. spin_unlock_irqrestore(lock, flags);
  427. return entry;
  428. }
  429. static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
  430. unsigned int qp_num)
  431. {
  432. struct ntb_transport_qp *qp = &nt->qps[qp_num];
  433. unsigned int rx_size, num_qps_mw;
  434. u8 mw_num, mw_max;
  435. unsigned int i;
  436. mw_max = ntb_max_mw(nt->ndev);
  437. mw_num = QP_TO_MW(nt->ndev, qp_num);
  438. WARN_ON(nt->mw[mw_num].virt_addr == NULL);
  439. if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
  440. num_qps_mw = nt->max_qps / mw_max + 1;
  441. else
  442. num_qps_mw = nt->max_qps / mw_max;
  443. rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
  444. qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
  445. rx_size -= sizeof(struct ntb_rx_info);
  446. qp->remote_rx_info = qp->rx_buff + rx_size;
  447. /* Due to housekeeping, there must be atleast 2 buffs */
  448. qp->rx_max_frame = min(transport_mtu, rx_size / 2);
  449. qp->rx_max_entry = rx_size / qp->rx_max_frame;
  450. qp->rx_index = 0;
  451. qp->remote_rx_info->entry = qp->rx_max_entry - 1;
  452. /* setup the hdr offsets with 0's */
  453. for (i = 0; i < qp->rx_max_entry; i++) {
  454. void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
  455. sizeof(struct ntb_payload_header);
  456. memset(offset, 0, sizeof(struct ntb_payload_header));
  457. }
  458. qp->rx_pkts = 0;
  459. qp->tx_pkts = 0;
  460. qp->tx_index = 0;
  461. }
  462. static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
  463. {
  464. struct ntb_transport_mw *mw = &nt->mw[num_mw];
  465. struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
  466. if (!mw->virt_addr)
  467. return;
  468. dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
  469. mw->virt_addr = NULL;
  470. }
  471. static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
  472. {
  473. struct ntb_transport_mw *mw = &nt->mw[num_mw];
  474. struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
  475. /* No need to re-setup */
  476. if (mw->size == ALIGN(size, 4096))
  477. return 0;
  478. if (mw->size != 0)
  479. ntb_free_mw(nt, num_mw);
  480. /* Alloc memory for receiving data. Must be 4k aligned */
  481. mw->size = ALIGN(size, 4096);
  482. mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
  483. GFP_KERNEL);
  484. if (!mw->virt_addr) {
  485. mw->size = 0;
  486. dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
  487. (int) mw->size);
  488. return -ENOMEM;
  489. }
  490. /* Notify HW the memory location of the receive buffer */
  491. ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
  492. return 0;
  493. }
  494. static void ntb_qp_link_cleanup(struct work_struct *work)
  495. {
  496. struct ntb_transport_qp *qp = container_of(work,
  497. struct ntb_transport_qp,
  498. link_cleanup);
  499. struct ntb_transport *nt = qp->transport;
  500. struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
  501. if (qp->qp_link == NTB_LINK_DOWN) {
  502. cancel_delayed_work_sync(&qp->link_work);
  503. return;
  504. }
  505. if (qp->event_handler)
  506. qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
  507. dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
  508. qp->qp_link = NTB_LINK_DOWN;
  509. if (nt->transport_link == NTB_LINK_UP)
  510. schedule_delayed_work(&qp->link_work,
  511. msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
  512. }
  513. static void ntb_qp_link_down(struct ntb_transport_qp *qp)
  514. {
  515. schedule_work(&qp->link_cleanup);
  516. }
  517. static void ntb_transport_link_cleanup(struct work_struct *work)
  518. {
  519. struct ntb_transport *nt = container_of(work, struct ntb_transport,
  520. link_cleanup);
  521. int i;
  522. if (nt->transport_link == NTB_LINK_DOWN)
  523. cancel_delayed_work_sync(&nt->link_work);
  524. else
  525. nt->transport_link = NTB_LINK_DOWN;
  526. /* Pass along the info to any clients */
  527. for (i = 0; i < nt->max_qps; i++)
  528. if (!test_bit(i, &nt->qp_bitmap))
  529. ntb_qp_link_down(&nt->qps[i]);
  530. /* The scratchpad registers keep the values if the remote side
  531. * goes down, blast them now to give them a sane value the next
  532. * time they are accessed
  533. */
  534. for (i = 0; i < MAX_SPAD; i++)
  535. ntb_write_local_spad(nt->ndev, i, 0);
  536. }
  537. static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
  538. {
  539. struct ntb_transport *nt = data;
  540. switch (event) {
  541. case NTB_EVENT_HW_LINK_UP:
  542. schedule_delayed_work(&nt->link_work, 0);
  543. break;
  544. case NTB_EVENT_HW_LINK_DOWN:
  545. schedule_work(&nt->link_cleanup);
  546. break;
  547. default:
  548. BUG();
  549. }
  550. }
  551. static void ntb_transport_link_work(struct work_struct *work)
  552. {
  553. struct ntb_transport *nt = container_of(work, struct ntb_transport,
  554. link_work.work);
  555. struct ntb_device *ndev = nt->ndev;
  556. struct pci_dev *pdev = ntb_query_pdev(ndev);
  557. u32 val;
  558. int rc, i;
  559. /* send the local info, in the opposite order of the way we read it */
  560. for (i = 0; i < ntb_max_mw(ndev); i++) {
  561. rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
  562. ntb_get_mw_size(ndev, i) >> 32);
  563. if (rc) {
  564. dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
  565. (u32)(ntb_get_mw_size(ndev, i) >> 32),
  566. MW0_SZ_HIGH + (i * 2));
  567. goto out;
  568. }
  569. rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
  570. (u32) ntb_get_mw_size(ndev, i));
  571. if (rc) {
  572. dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
  573. (u32) ntb_get_mw_size(ndev, i),
  574. MW0_SZ_LOW + (i * 2));
  575. goto out;
  576. }
  577. }
  578. rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
  579. if (rc) {
  580. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  581. ntb_max_mw(ndev), NUM_MWS);
  582. goto out;
  583. }
  584. rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
  585. if (rc) {
  586. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  587. nt->max_qps, NUM_QPS);
  588. goto out;
  589. }
  590. rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
  591. if (rc) {
  592. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  593. NTB_TRANSPORT_VERSION, VERSION);
  594. goto out;
  595. }
  596. /* Query the remote side for its info */
  597. rc = ntb_read_remote_spad(ndev, VERSION, &val);
  598. if (rc) {
  599. dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
  600. goto out;
  601. }
  602. if (val != NTB_TRANSPORT_VERSION)
  603. goto out;
  604. dev_dbg(&pdev->dev, "Remote version = %d\n", val);
  605. rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
  606. if (rc) {
  607. dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
  608. goto out;
  609. }
  610. if (val != nt->max_qps)
  611. goto out;
  612. dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
  613. rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
  614. if (rc) {
  615. dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
  616. goto out;
  617. }
  618. if (val != ntb_max_mw(ndev))
  619. goto out;
  620. dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
  621. for (i = 0; i < ntb_max_mw(ndev); i++) {
  622. u64 val64;
  623. rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
  624. if (rc) {
  625. dev_err(&pdev->dev, "Error reading remote spad %d\n",
  626. MW0_SZ_HIGH + (i * 2));
  627. goto out1;
  628. }
  629. val64 = (u64) val << 32;
  630. rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
  631. if (rc) {
  632. dev_err(&pdev->dev, "Error reading remote spad %d\n",
  633. MW0_SZ_LOW + (i * 2));
  634. goto out1;
  635. }
  636. val64 |= val;
  637. dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
  638. rc = ntb_set_mw(nt, i, val64);
  639. if (rc)
  640. goto out1;
  641. }
  642. nt->transport_link = NTB_LINK_UP;
  643. for (i = 0; i < nt->max_qps; i++) {
  644. struct ntb_transport_qp *qp = &nt->qps[i];
  645. ntb_transport_setup_qp_mw(nt, i);
  646. if (qp->client_ready == NTB_LINK_UP)
  647. schedule_delayed_work(&qp->link_work, 0);
  648. }
  649. return;
  650. out1:
  651. for (i = 0; i < ntb_max_mw(ndev); i++)
  652. ntb_free_mw(nt, i);
  653. out:
  654. if (ntb_hw_link_status(ndev))
  655. schedule_delayed_work(&nt->link_work,
  656. msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
  657. }
  658. static void ntb_qp_link_work(struct work_struct *work)
  659. {
  660. struct ntb_transport_qp *qp = container_of(work,
  661. struct ntb_transport_qp,
  662. link_work.work);
  663. struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
  664. struct ntb_transport *nt = qp->transport;
  665. int rc, val;
  666. WARN_ON(nt->transport_link != NTB_LINK_UP);
  667. rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
  668. if (rc) {
  669. dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
  670. return;
  671. }
  672. rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
  673. if (rc)
  674. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  675. val | 1 << qp->qp_num, QP_LINKS);
  676. /* query remote spad for qp ready bits */
  677. rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
  678. if (rc)
  679. dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
  680. dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
  681. /* See if the remote side is up */
  682. if (1 << qp->qp_num & val) {
  683. qp->qp_link = NTB_LINK_UP;
  684. dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
  685. if (qp->event_handler)
  686. qp->event_handler(qp->cb_data, NTB_LINK_UP);
  687. } else if (nt->transport_link == NTB_LINK_UP)
  688. schedule_delayed_work(&qp->link_work,
  689. msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
  690. }
  691. static int ntb_transport_init_queue(struct ntb_transport *nt,
  692. unsigned int qp_num)
  693. {
  694. struct ntb_transport_qp *qp;
  695. unsigned int num_qps_mw, tx_size;
  696. u8 mw_num, mw_max;
  697. u64 qp_offset;
  698. mw_max = ntb_max_mw(nt->ndev);
  699. mw_num = QP_TO_MW(nt->ndev, qp_num);
  700. qp = &nt->qps[qp_num];
  701. qp->qp_num = qp_num;
  702. qp->transport = nt;
  703. qp->ndev = nt->ndev;
  704. qp->qp_link = NTB_LINK_DOWN;
  705. qp->client_ready = NTB_LINK_DOWN;
  706. qp->event_handler = NULL;
  707. if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
  708. num_qps_mw = nt->max_qps / mw_max + 1;
  709. else
  710. num_qps_mw = nt->max_qps / mw_max;
  711. tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
  712. qp_offset = qp_num / mw_max * tx_size;
  713. qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
  714. if (!qp->tx_mw)
  715. return -EINVAL;
  716. qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
  717. if (!qp->tx_mw_phys)
  718. return -EINVAL;
  719. tx_size -= sizeof(struct ntb_rx_info);
  720. qp->rx_info = qp->tx_mw + tx_size;
  721. /* Due to housekeeping, there must be atleast 2 buffs */
  722. qp->tx_max_frame = min(transport_mtu, tx_size / 2);
  723. qp->tx_max_entry = tx_size / qp->tx_max_frame;
  724. if (ntb_query_debugfs(nt->ndev)) {
  725. char debugfs_name[4];
  726. snprintf(debugfs_name, 4, "qp%d", qp_num);
  727. qp->debugfs_dir = debugfs_create_dir(debugfs_name,
  728. ntb_query_debugfs(nt->ndev));
  729. qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
  730. qp->debugfs_dir, qp,
  731. &ntb_qp_debugfs_stats);
  732. }
  733. INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
  734. INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
  735. spin_lock_init(&qp->ntb_rx_pend_q_lock);
  736. spin_lock_init(&qp->ntb_rx_free_q_lock);
  737. spin_lock_init(&qp->ntb_tx_free_q_lock);
  738. INIT_LIST_HEAD(&qp->rx_pend_q);
  739. INIT_LIST_HEAD(&qp->rx_free_q);
  740. INIT_LIST_HEAD(&qp->tx_free_q);
  741. return 0;
  742. }
  743. int ntb_transport_init(struct pci_dev *pdev)
  744. {
  745. struct ntb_transport *nt;
  746. int rc, i;
  747. nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
  748. if (!nt)
  749. return -ENOMEM;
  750. nt->ndev = ntb_register_transport(pdev, nt);
  751. if (!nt->ndev) {
  752. rc = -EIO;
  753. goto err;
  754. }
  755. nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
  756. GFP_KERNEL);
  757. if (!nt->mw) {
  758. rc = -ENOMEM;
  759. goto err1;
  760. }
  761. if (max_num_clients)
  762. nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
  763. else
  764. nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
  765. nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
  766. GFP_KERNEL);
  767. if (!nt->qps) {
  768. rc = -ENOMEM;
  769. goto err2;
  770. }
  771. nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
  772. for (i = 0; i < nt->max_qps; i++) {
  773. rc = ntb_transport_init_queue(nt, i);
  774. if (rc)
  775. goto err3;
  776. }
  777. INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
  778. INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
  779. rc = ntb_register_event_callback(nt->ndev,
  780. ntb_transport_event_callback);
  781. if (rc)
  782. goto err3;
  783. INIT_LIST_HEAD(&nt->client_devs);
  784. rc = ntb_bus_init(nt);
  785. if (rc)
  786. goto err4;
  787. if (ntb_hw_link_status(nt->ndev))
  788. schedule_delayed_work(&nt->link_work, 0);
  789. return 0;
  790. err4:
  791. ntb_unregister_event_callback(nt->ndev);
  792. err3:
  793. kfree(nt->qps);
  794. err2:
  795. kfree(nt->mw);
  796. err1:
  797. ntb_unregister_transport(nt->ndev);
  798. err:
  799. kfree(nt);
  800. return rc;
  801. }
  802. void ntb_transport_free(void *transport)
  803. {
  804. struct ntb_transport *nt = transport;
  805. struct ntb_device *ndev = nt->ndev;
  806. int i;
  807. nt->transport_link = NTB_LINK_DOWN;
  808. /* verify that all the qp's are freed */
  809. for (i = 0; i < nt->max_qps; i++) {
  810. if (!test_bit(i, &nt->qp_bitmap))
  811. ntb_transport_free_queue(&nt->qps[i]);
  812. debugfs_remove_recursive(nt->qps[i].debugfs_dir);
  813. }
  814. ntb_bus_remove(nt);
  815. cancel_delayed_work_sync(&nt->link_work);
  816. ntb_unregister_event_callback(ndev);
  817. for (i = 0; i < ntb_max_mw(ndev); i++)
  818. ntb_free_mw(nt, i);
  819. kfree(nt->qps);
  820. kfree(nt->mw);
  821. ntb_unregister_transport(ndev);
  822. kfree(nt);
  823. }
  824. static void ntb_rx_copy_callback(void *data)
  825. {
  826. struct ntb_queue_entry *entry = data;
  827. struct ntb_transport_qp *qp = entry->qp;
  828. void *cb_data = entry->cb_data;
  829. unsigned int len = entry->len;
  830. struct ntb_payload_header *hdr = entry->rx_hdr;
  831. /* Ensure that the data is fully copied out before clearing the flag */
  832. wmb();
  833. hdr->flags = 0;
  834. iowrite32(entry->index, &qp->rx_info->entry);
  835. ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
  836. if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
  837. qp->rx_handler(qp, qp->cb_data, cb_data, len);
  838. }
  839. static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
  840. {
  841. void *buf = entry->buf;
  842. size_t len = entry->len;
  843. memcpy(buf, offset, len);
  844. ntb_rx_copy_callback(entry);
  845. }
  846. static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
  847. size_t len)
  848. {
  849. struct dma_async_tx_descriptor *txd;
  850. struct ntb_transport_qp *qp = entry->qp;
  851. struct dma_chan *chan = qp->dma_chan;
  852. struct dma_device *device;
  853. size_t pay_off, buff_off;
  854. dma_addr_t src, dest;
  855. dma_cookie_t cookie;
  856. void *buf = entry->buf;
  857. unsigned long flags;
  858. entry->len = len;
  859. if (!chan)
  860. goto err;
  861. if (len < copy_bytes)
  862. goto err1;
  863. device = chan->device;
  864. pay_off = (size_t) offset & ~PAGE_MASK;
  865. buff_off = (size_t) buf & ~PAGE_MASK;
  866. if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
  867. goto err1;
  868. dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE);
  869. if (dma_mapping_error(device->dev, dest))
  870. goto err1;
  871. src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE);
  872. if (dma_mapping_error(device->dev, src))
  873. goto err2;
  874. flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE |
  875. DMA_PREP_INTERRUPT;
  876. txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
  877. if (!txd)
  878. goto err3;
  879. txd->callback = ntb_rx_copy_callback;
  880. txd->callback_param = entry;
  881. cookie = dmaengine_submit(txd);
  882. if (dma_submit_error(cookie))
  883. goto err3;
  884. qp->last_cookie = cookie;
  885. qp->rx_async++;
  886. return;
  887. err3:
  888. dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
  889. err2:
  890. dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE);
  891. err1:
  892. /* If the callbacks come out of order, the writing of the index to the
  893. * last completed will be out of order. This may result in the
  894. * receive stalling forever.
  895. */
  896. dma_sync_wait(chan, qp->last_cookie);
  897. err:
  898. ntb_memcpy_rx(entry, offset);
  899. qp->rx_memcpy++;
  900. }
  901. static int ntb_process_rxc(struct ntb_transport_qp *qp)
  902. {
  903. struct ntb_payload_header *hdr;
  904. struct ntb_queue_entry *entry;
  905. void *offset;
  906. offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
  907. hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
  908. entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
  909. if (!entry) {
  910. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  911. "no buffer - HDR ver %u, len %d, flags %x\n",
  912. hdr->ver, hdr->len, hdr->flags);
  913. qp->rx_err_no_buf++;
  914. return -ENOMEM;
  915. }
  916. if (!(hdr->flags & DESC_DONE_FLAG)) {
  917. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  918. &qp->rx_pend_q);
  919. qp->rx_ring_empty++;
  920. return -EAGAIN;
  921. }
  922. if (hdr->ver != (u32) qp->rx_pkts) {
  923. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  924. "qp %d: version mismatch, expected %llu - got %u\n",
  925. qp->qp_num, qp->rx_pkts, hdr->ver);
  926. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  927. &qp->rx_pend_q);
  928. qp->rx_err_ver++;
  929. return -EIO;
  930. }
  931. if (hdr->flags & LINK_DOWN_FLAG) {
  932. ntb_qp_link_down(qp);
  933. goto err;
  934. }
  935. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  936. "rx offset %u, ver %u - %d payload received, buf size %d\n",
  937. qp->rx_index, hdr->ver, hdr->len, entry->len);
  938. qp->rx_bytes += hdr->len;
  939. qp->rx_pkts++;
  940. if (hdr->len > entry->len) {
  941. qp->rx_err_oflow++;
  942. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  943. "RX overflow! Wanted %d got %d\n",
  944. hdr->len, entry->len);
  945. goto err;
  946. }
  947. entry->index = qp->rx_index;
  948. entry->rx_hdr = hdr;
  949. ntb_async_rx(entry, offset, hdr->len);
  950. out:
  951. qp->rx_index++;
  952. qp->rx_index %= qp->rx_max_entry;
  953. return 0;
  954. err:
  955. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  956. &qp->rx_pend_q);
  957. /* Ensure that the data is fully copied out before clearing the flag */
  958. wmb();
  959. hdr->flags = 0;
  960. iowrite32(qp->rx_index, &qp->rx_info->entry);
  961. goto out;
  962. }
  963. static void ntb_transport_rx(unsigned long data)
  964. {
  965. struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
  966. int rc, i;
  967. /* Limit the number of packets processed in a single interrupt to
  968. * provide fairness to others
  969. */
  970. for (i = 0; i < qp->rx_max_entry; i++) {
  971. rc = ntb_process_rxc(qp);
  972. if (rc)
  973. break;
  974. }
  975. if (qp->dma_chan)
  976. dma_async_issue_pending(qp->dma_chan);
  977. }
  978. static void ntb_transport_rxc_db(void *data, int db_num)
  979. {
  980. struct ntb_transport_qp *qp = data;
  981. dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
  982. __func__, db_num);
  983. tasklet_schedule(&qp->rx_work);
  984. }
  985. static void ntb_tx_copy_callback(void *data)
  986. {
  987. struct ntb_queue_entry *entry = data;
  988. struct ntb_transport_qp *qp = entry->qp;
  989. struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
  990. /* Ensure that the data is fully copied out before setting the flags */
  991. wmb();
  992. iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
  993. ntb_ring_doorbell(qp->ndev, qp->qp_num);
  994. /* The entry length can only be zero if the packet is intended to be a
  995. * "link down" or similar. Since no payload is being sent in these
  996. * cases, there is nothing to add to the completion queue.
  997. */
  998. if (entry->len > 0) {
  999. qp->tx_bytes += entry->len;
  1000. if (qp->tx_handler)
  1001. qp->tx_handler(qp, qp->cb_data, entry->cb_data,
  1002. entry->len);
  1003. }
  1004. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
  1005. }
  1006. static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
  1007. {
  1008. memcpy_toio(offset, entry->buf, entry->len);
  1009. ntb_tx_copy_callback(entry);
  1010. }
  1011. static void ntb_async_tx(struct ntb_transport_qp *qp,
  1012. struct ntb_queue_entry *entry)
  1013. {
  1014. struct ntb_payload_header __iomem *hdr;
  1015. struct dma_async_tx_descriptor *txd;
  1016. struct dma_chan *chan = qp->dma_chan;
  1017. struct dma_device *device;
  1018. size_t dest_off, buff_off;
  1019. dma_addr_t src, dest;
  1020. dma_cookie_t cookie;
  1021. void __iomem *offset;
  1022. size_t len = entry->len;
  1023. void *buf = entry->buf;
  1024. unsigned long flags;
  1025. offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
  1026. hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
  1027. entry->tx_hdr = hdr;
  1028. iowrite32(entry->len, &hdr->len);
  1029. iowrite32((u32) qp->tx_pkts, &hdr->ver);
  1030. if (!chan)
  1031. goto err;
  1032. if (len < copy_bytes)
  1033. goto err;
  1034. device = chan->device;
  1035. dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
  1036. buff_off = (size_t) buf & ~PAGE_MASK;
  1037. dest_off = (size_t) dest & ~PAGE_MASK;
  1038. if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
  1039. goto err;
  1040. src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE);
  1041. if (dma_mapping_error(device->dev, src))
  1042. goto err;
  1043. flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT;
  1044. txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
  1045. if (!txd)
  1046. goto err1;
  1047. txd->callback = ntb_tx_copy_callback;
  1048. txd->callback_param = entry;
  1049. cookie = dmaengine_submit(txd);
  1050. if (dma_submit_error(cookie))
  1051. goto err1;
  1052. dma_async_issue_pending(chan);
  1053. qp->tx_async++;
  1054. return;
  1055. err1:
  1056. dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
  1057. err:
  1058. ntb_memcpy_tx(entry, offset);
  1059. qp->tx_memcpy++;
  1060. }
  1061. static int ntb_process_tx(struct ntb_transport_qp *qp,
  1062. struct ntb_queue_entry *entry)
  1063. {
  1064. dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
  1065. qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
  1066. entry->buf);
  1067. if (qp->tx_index == qp->remote_rx_info->entry) {
  1068. qp->tx_ring_full++;
  1069. return -EAGAIN;
  1070. }
  1071. if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
  1072. if (qp->tx_handler)
  1073. qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
  1074. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
  1075. &qp->tx_free_q);
  1076. return 0;
  1077. }
  1078. ntb_async_tx(qp, entry);
  1079. qp->tx_index++;
  1080. qp->tx_index %= qp->tx_max_entry;
  1081. qp->tx_pkts++;
  1082. return 0;
  1083. }
  1084. static void ntb_send_link_down(struct ntb_transport_qp *qp)
  1085. {
  1086. struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
  1087. struct ntb_queue_entry *entry;
  1088. int i, rc;
  1089. if (qp->qp_link == NTB_LINK_DOWN)
  1090. return;
  1091. qp->qp_link = NTB_LINK_DOWN;
  1092. dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
  1093. for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
  1094. entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
  1095. if (entry)
  1096. break;
  1097. msleep(100);
  1098. }
  1099. if (!entry)
  1100. return;
  1101. entry->cb_data = NULL;
  1102. entry->buf = NULL;
  1103. entry->len = 0;
  1104. entry->flags = LINK_DOWN_FLAG;
  1105. rc = ntb_process_tx(qp, entry);
  1106. if (rc)
  1107. dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
  1108. qp->qp_num);
  1109. }
  1110. /**
  1111. * ntb_transport_create_queue - Create a new NTB transport layer queue
  1112. * @rx_handler: receive callback function
  1113. * @tx_handler: transmit callback function
  1114. * @event_handler: event callback function
  1115. *
  1116. * Create a new NTB transport layer queue and provide the queue with a callback
  1117. * routine for both transmit and receive. The receive callback routine will be
  1118. * used to pass up data when the transport has received it on the queue. The
  1119. * transmit callback routine will be called when the transport has completed the
  1120. * transmission of the data on the queue and the data is ready to be freed.
  1121. *
  1122. * RETURNS: pointer to newly created ntb_queue, NULL on error.
  1123. */
  1124. struct ntb_transport_qp *
  1125. ntb_transport_create_queue(void *data, struct pci_dev *pdev,
  1126. const struct ntb_queue_handlers *handlers)
  1127. {
  1128. struct ntb_queue_entry *entry;
  1129. struct ntb_transport_qp *qp;
  1130. struct ntb_transport *nt;
  1131. unsigned int free_queue;
  1132. int rc, i;
  1133. nt = ntb_find_transport(pdev);
  1134. if (!nt)
  1135. goto err;
  1136. free_queue = ffs(nt->qp_bitmap);
  1137. if (!free_queue)
  1138. goto err;
  1139. /* decrement free_queue to make it zero based */
  1140. free_queue--;
  1141. clear_bit(free_queue, &nt->qp_bitmap);
  1142. qp = &nt->qps[free_queue];
  1143. qp->cb_data = data;
  1144. qp->rx_handler = handlers->rx_handler;
  1145. qp->tx_handler = handlers->tx_handler;
  1146. qp->event_handler = handlers->event_handler;
  1147. qp->dma_chan = dma_find_channel(DMA_MEMCPY);
  1148. if (!qp->dma_chan)
  1149. dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
  1150. else
  1151. dmaengine_get();
  1152. for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
  1153. entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
  1154. if (!entry)
  1155. goto err1;
  1156. entry->qp = qp;
  1157. ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
  1158. &qp->rx_free_q);
  1159. }
  1160. for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
  1161. entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
  1162. if (!entry)
  1163. goto err2;
  1164. entry->qp = qp;
  1165. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
  1166. &qp->tx_free_q);
  1167. }
  1168. tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
  1169. rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
  1170. ntb_transport_rxc_db);
  1171. if (rc)
  1172. goto err3;
  1173. dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
  1174. return qp;
  1175. err3:
  1176. tasklet_disable(&qp->rx_work);
  1177. err2:
  1178. while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
  1179. kfree(entry);
  1180. err1:
  1181. while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
  1182. kfree(entry);
  1183. set_bit(free_queue, &nt->qp_bitmap);
  1184. err:
  1185. return NULL;
  1186. }
  1187. EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
  1188. /**
  1189. * ntb_transport_free_queue - Frees NTB transport queue
  1190. * @qp: NTB queue to be freed
  1191. *
  1192. * Frees NTB transport queue
  1193. */
  1194. void ntb_transport_free_queue(struct ntb_transport_qp *qp)
  1195. {
  1196. struct pci_dev *pdev;
  1197. struct ntb_queue_entry *entry;
  1198. if (!qp)
  1199. return;
  1200. pdev = ntb_query_pdev(qp->ndev);
  1201. if (qp->dma_chan) {
  1202. struct dma_chan *chan = qp->dma_chan;
  1203. /* Putting the dma_chan to NULL will force any new traffic to be
  1204. * processed by the CPU instead of the DAM engine
  1205. */
  1206. qp->dma_chan = NULL;
  1207. /* Try to be nice and wait for any queued DMA engine
  1208. * transactions to process before smashing it with a rock
  1209. */
  1210. dma_sync_wait(chan, qp->last_cookie);
  1211. dmaengine_terminate_all(chan);
  1212. dmaengine_put();
  1213. }
  1214. ntb_unregister_db_callback(qp->ndev, qp->qp_num);
  1215. tasklet_disable(&qp->rx_work);
  1216. cancel_delayed_work_sync(&qp->link_work);
  1217. while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
  1218. kfree(entry);
  1219. while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
  1220. dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
  1221. kfree(entry);
  1222. }
  1223. while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
  1224. kfree(entry);
  1225. set_bit(qp->qp_num, &qp->transport->qp_bitmap);
  1226. dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
  1227. }
  1228. EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
  1229. /**
  1230. * ntb_transport_rx_remove - Dequeues enqueued rx packet
  1231. * @qp: NTB queue to be freed
  1232. * @len: pointer to variable to write enqueued buffers length
  1233. *
  1234. * Dequeues unused buffers from receive queue. Should only be used during
  1235. * shutdown of qp.
  1236. *
  1237. * RETURNS: NULL error value on error, or void* for success.
  1238. */
  1239. void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
  1240. {
  1241. struct ntb_queue_entry *entry;
  1242. void *buf;
  1243. if (!qp || qp->client_ready == NTB_LINK_UP)
  1244. return NULL;
  1245. entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
  1246. if (!entry)
  1247. return NULL;
  1248. buf = entry->cb_data;
  1249. *len = entry->len;
  1250. ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
  1251. return buf;
  1252. }
  1253. EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
  1254. /**
  1255. * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
  1256. * @qp: NTB transport layer queue the entry is to be enqueued on
  1257. * @cb: per buffer pointer for callback function to use
  1258. * @data: pointer to data buffer that incoming packets will be copied into
  1259. * @len: length of the data buffer
  1260. *
  1261. * Enqueue a new receive buffer onto the transport queue into which a NTB
  1262. * payload can be received into.
  1263. *
  1264. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  1265. */
  1266. int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
  1267. unsigned int len)
  1268. {
  1269. struct ntb_queue_entry *entry;
  1270. if (!qp)
  1271. return -EINVAL;
  1272. entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
  1273. if (!entry)
  1274. return -ENOMEM;
  1275. entry->cb_data = cb;
  1276. entry->buf = data;
  1277. entry->len = len;
  1278. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
  1279. return 0;
  1280. }
  1281. EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
  1282. /**
  1283. * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
  1284. * @qp: NTB transport layer queue the entry is to be enqueued on
  1285. * @cb: per buffer pointer for callback function to use
  1286. * @data: pointer to data buffer that will be sent
  1287. * @len: length of the data buffer
  1288. *
  1289. * Enqueue a new transmit buffer onto the transport queue from which a NTB
  1290. * payload will be transmitted. This assumes that a lock is being held to
  1291. * serialize access to the qp.
  1292. *
  1293. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  1294. */
  1295. int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
  1296. unsigned int len)
  1297. {
  1298. struct ntb_queue_entry *entry;
  1299. int rc;
  1300. if (!qp || qp->qp_link != NTB_LINK_UP || !len)
  1301. return -EINVAL;
  1302. entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
  1303. if (!entry) {
  1304. qp->tx_err_no_buf++;
  1305. return -ENOMEM;
  1306. }
  1307. entry->cb_data = cb;
  1308. entry->buf = data;
  1309. entry->len = len;
  1310. entry->flags = 0;
  1311. rc = ntb_process_tx(qp, entry);
  1312. if (rc)
  1313. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
  1314. &qp->tx_free_q);
  1315. return rc;
  1316. }
  1317. EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
  1318. /**
  1319. * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
  1320. * @qp: NTB transport layer queue to be enabled
  1321. *
  1322. * Notify NTB transport layer of client readiness to use queue
  1323. */
  1324. void ntb_transport_link_up(struct ntb_transport_qp *qp)
  1325. {
  1326. if (!qp)
  1327. return;
  1328. qp->client_ready = NTB_LINK_UP;
  1329. if (qp->transport->transport_link == NTB_LINK_UP)
  1330. schedule_delayed_work(&qp->link_work, 0);
  1331. }
  1332. EXPORT_SYMBOL_GPL(ntb_transport_link_up);
  1333. /**
  1334. * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
  1335. * @qp: NTB transport layer queue to be disabled
  1336. *
  1337. * Notify NTB transport layer of client's desire to no longer receive data on
  1338. * transport queue specified. It is the client's responsibility to ensure all
  1339. * entries on queue are purged or otherwise handled appropriately.
  1340. */
  1341. void ntb_transport_link_down(struct ntb_transport_qp *qp)
  1342. {
  1343. struct pci_dev *pdev;
  1344. int rc, val;
  1345. if (!qp)
  1346. return;
  1347. pdev = ntb_query_pdev(qp->ndev);
  1348. qp->client_ready = NTB_LINK_DOWN;
  1349. rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
  1350. if (rc) {
  1351. dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
  1352. return;
  1353. }
  1354. rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
  1355. val & ~(1 << qp->qp_num));
  1356. if (rc)
  1357. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  1358. val & ~(1 << qp->qp_num), QP_LINKS);
  1359. if (qp->qp_link == NTB_LINK_UP)
  1360. ntb_send_link_down(qp);
  1361. else
  1362. cancel_delayed_work_sync(&qp->link_work);
  1363. }
  1364. EXPORT_SYMBOL_GPL(ntb_transport_link_down);
  1365. /**
  1366. * ntb_transport_link_query - Query transport link state
  1367. * @qp: NTB transport layer queue to be queried
  1368. *
  1369. * Query connectivity to the remote system of the NTB transport queue
  1370. *
  1371. * RETURNS: true for link up or false for link down
  1372. */
  1373. bool ntb_transport_link_query(struct ntb_transport_qp *qp)
  1374. {
  1375. if (!qp)
  1376. return false;
  1377. return qp->qp_link == NTB_LINK_UP;
  1378. }
  1379. EXPORT_SYMBOL_GPL(ntb_transport_link_query);
  1380. /**
  1381. * ntb_transport_qp_num - Query the qp number
  1382. * @qp: NTB transport layer queue to be queried
  1383. *
  1384. * Query qp number of the NTB transport queue
  1385. *
  1386. * RETURNS: a zero based number specifying the qp number
  1387. */
  1388. unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
  1389. {
  1390. if (!qp)
  1391. return 0;
  1392. return qp->qp_num;
  1393. }
  1394. EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
  1395. /**
  1396. * ntb_transport_max_size - Query the max payload size of a qp
  1397. * @qp: NTB transport layer queue to be queried
  1398. *
  1399. * Query the maximum payload size permissible on the given qp
  1400. *
  1401. * RETURNS: the max payload size of a qp
  1402. */
  1403. unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
  1404. {
  1405. unsigned int max;
  1406. if (!qp)
  1407. return 0;
  1408. if (!qp->dma_chan)
  1409. return qp->tx_max_frame - sizeof(struct ntb_payload_header);
  1410. /* If DMA engine usage is possible, try to find the max size for that */
  1411. max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
  1412. max -= max % (1 << qp->dma_chan->device->copy_align);
  1413. return max;
  1414. }
  1415. EXPORT_SYMBOL_GPL(ntb_transport_max_size);