ntb_transport.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2012 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * BSD LICENSE
  14. *
  15. * Copyright(c) 2012 Intel Corporation. All rights reserved.
  16. *
  17. * Redistribution and use in source and binary forms, with or without
  18. * modification, are permitted provided that the following conditions
  19. * are met:
  20. *
  21. * * Redistributions of source code must retain the above copyright
  22. * notice, this list of conditions and the following disclaimer.
  23. * * Redistributions in binary form must reproduce the above copy
  24. * notice, this list of conditions and the following disclaimer in
  25. * the documentation and/or other materials provided with the
  26. * distribution.
  27. * * Neither the name of Intel Corporation nor the names of its
  28. * contributors may be used to endorse or promote products derived
  29. * from this software without specific prior written permission.
  30. *
  31. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  32. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  33. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  34. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  35. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  36. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  37. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  38. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  39. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  40. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  41. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42. *
  43. * Intel PCIe NTB Linux driver
  44. *
  45. * Contact Information:
  46. * Jon Mason <jon.mason@intel.com>
  47. */
  48. #include <linux/debugfs.h>
  49. #include <linux/delay.h>
  50. #include <linux/dma-mapping.h>
  51. #include <linux/errno.h>
  52. #include <linux/export.h>
  53. #include <linux/interrupt.h>
  54. #include <linux/module.h>
  55. #include <linux/pci.h>
  56. #include <linux/slab.h>
  57. #include <linux/types.h>
  58. #include <linux/ntb.h>
  59. #include "ntb_hw.h"
  60. #define NTB_TRANSPORT_VERSION 1
  61. static unsigned int transport_mtu = 0x401E;
  62. module_param(transport_mtu, uint, 0644);
  63. MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
  64. static unsigned char max_num_clients = 2;
  65. module_param(max_num_clients, byte, 0644);
  66. MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
  67. struct ntb_queue_entry {
  68. /* ntb_queue list reference */
  69. struct list_head entry;
  70. /* pointers to data to be transfered */
  71. void *cb_data;
  72. void *buf;
  73. unsigned int len;
  74. unsigned int flags;
  75. };
  76. struct ntb_transport_qp {
  77. struct ntb_transport *transport;
  78. struct ntb_device *ndev;
  79. void *cb_data;
  80. bool client_ready;
  81. bool qp_link;
  82. u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
  83. void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
  84. void *data, int len);
  85. struct list_head tx_free_q;
  86. spinlock_t ntb_tx_free_q_lock;
  87. void *tx_mw_begin;
  88. void *tx_mw_end;
  89. void *tx_offset;
  90. unsigned int tx_max_frame;
  91. void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
  92. void *data, int len);
  93. struct tasklet_struct rx_work;
  94. struct list_head rx_pend_q;
  95. struct list_head rx_free_q;
  96. spinlock_t ntb_rx_pend_q_lock;
  97. spinlock_t ntb_rx_free_q_lock;
  98. void *rx_buff_begin;
  99. void *rx_buff_end;
  100. void *rx_offset;
  101. unsigned int rx_max_frame;
  102. void (*event_handler) (void *data, int status);
  103. struct delayed_work link_work;
  104. struct work_struct link_cleanup;
  105. struct dentry *debugfs_dir;
  106. struct dentry *debugfs_stats;
  107. /* Stats */
  108. u64 rx_bytes;
  109. u64 rx_pkts;
  110. u64 rx_ring_empty;
  111. u64 rx_err_no_buf;
  112. u64 rx_err_oflow;
  113. u64 rx_err_ver;
  114. u64 tx_bytes;
  115. u64 tx_pkts;
  116. u64 tx_ring_full;
  117. };
  118. struct ntb_transport_mw {
  119. size_t size;
  120. void *virt_addr;
  121. dma_addr_t dma_addr;
  122. };
  123. struct ntb_transport_client_dev {
  124. struct list_head entry;
  125. struct device dev;
  126. };
  127. struct ntb_transport {
  128. struct list_head entry;
  129. struct list_head client_devs;
  130. struct ntb_device *ndev;
  131. struct ntb_transport_mw mw[NTB_NUM_MW];
  132. struct ntb_transport_qp *qps;
  133. unsigned int max_qps;
  134. unsigned long qp_bitmap;
  135. bool transport_link;
  136. struct delayed_work link_work;
  137. struct work_struct link_cleanup;
  138. struct dentry *debugfs_dir;
  139. };
  140. enum {
  141. DESC_DONE_FLAG = 1 << 0,
  142. LINK_DOWN_FLAG = 1 << 1,
  143. };
  144. struct ntb_payload_header {
  145. u64 ver;
  146. unsigned int len;
  147. unsigned int flags;
  148. };
  149. enum {
  150. VERSION = 0,
  151. MW0_SZ,
  152. MW1_SZ,
  153. NUM_QPS,
  154. QP_LINKS,
  155. MAX_SPAD,
  156. };
  157. #define QP_TO_MW(qp) ((qp) % NTB_NUM_MW)
  158. #define NTB_QP_DEF_NUM_ENTRIES 100
  159. #define NTB_LINK_DOWN_TIMEOUT 10
  160. static int ntb_match_bus(struct device *dev, struct device_driver *drv)
  161. {
  162. return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
  163. }
  164. static int ntb_client_probe(struct device *dev)
  165. {
  166. const struct ntb_client *drv = container_of(dev->driver,
  167. struct ntb_client, driver);
  168. struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
  169. int rc = -EINVAL;
  170. get_device(dev);
  171. if (drv && drv->probe)
  172. rc = drv->probe(pdev);
  173. if (rc)
  174. put_device(dev);
  175. return rc;
  176. }
  177. static int ntb_client_remove(struct device *dev)
  178. {
  179. const struct ntb_client *drv = container_of(dev->driver,
  180. struct ntb_client, driver);
  181. struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
  182. if (drv && drv->remove)
  183. drv->remove(pdev);
  184. put_device(dev);
  185. return 0;
  186. }
  187. struct bus_type ntb_bus_type = {
  188. .name = "ntb_bus",
  189. .match = ntb_match_bus,
  190. .probe = ntb_client_probe,
  191. .remove = ntb_client_remove,
  192. };
  193. static LIST_HEAD(ntb_transport_list);
  194. static int ntb_bus_init(struct ntb_transport *nt)
  195. {
  196. if (list_empty(&ntb_transport_list)) {
  197. int rc = bus_register(&ntb_bus_type);
  198. if (rc)
  199. return rc;
  200. }
  201. list_add(&nt->entry, &ntb_transport_list);
  202. return 0;
  203. }
  204. static void ntb_bus_remove(struct ntb_transport *nt)
  205. {
  206. struct ntb_transport_client_dev *client_dev, *cd;
  207. list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
  208. dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
  209. dev_name(&client_dev->dev));
  210. list_del(&client_dev->entry);
  211. device_unregister(&client_dev->dev);
  212. }
  213. list_del(&nt->entry);
  214. if (list_empty(&ntb_transport_list))
  215. bus_unregister(&ntb_bus_type);
  216. }
  217. static void ntb_client_release(struct device *dev)
  218. {
  219. struct ntb_transport_client_dev *client_dev;
  220. client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
  221. kfree(client_dev);
  222. }
  223. /**
  224. * ntb_unregister_client_dev - Unregister NTB client device
  225. * @device_name: Name of NTB client device
  226. *
  227. * Unregister an NTB client device with the NTB transport layer
  228. */
  229. void ntb_unregister_client_dev(char *device_name)
  230. {
  231. struct ntb_transport_client_dev *client, *cd;
  232. struct ntb_transport *nt;
  233. list_for_each_entry(nt, &ntb_transport_list, entry)
  234. list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
  235. if (!strncmp(dev_name(&client->dev), device_name,
  236. strlen(device_name))) {
  237. list_del(&client->entry);
  238. device_unregister(&client->dev);
  239. }
  240. }
  241. EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
  242. /**
  243. * ntb_register_client_dev - Register NTB client device
  244. * @device_name: Name of NTB client device
  245. *
  246. * Register an NTB client device with the NTB transport layer
  247. */
  248. int ntb_register_client_dev(char *device_name)
  249. {
  250. struct ntb_transport_client_dev *client_dev;
  251. struct ntb_transport *nt;
  252. int rc;
  253. if (list_empty(&ntb_transport_list))
  254. return -ENODEV;
  255. list_for_each_entry(nt, &ntb_transport_list, entry) {
  256. struct device *dev;
  257. client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
  258. GFP_KERNEL);
  259. if (!client_dev) {
  260. rc = -ENOMEM;
  261. goto err;
  262. }
  263. dev = &client_dev->dev;
  264. /* setup and register client devices */
  265. dev_set_name(dev, "%s", device_name);
  266. dev->bus = &ntb_bus_type;
  267. dev->release = ntb_client_release;
  268. dev->parent = &ntb_query_pdev(nt->ndev)->dev;
  269. rc = device_register(dev);
  270. if (rc) {
  271. kfree(client_dev);
  272. goto err;
  273. }
  274. list_add_tail(&client_dev->entry, &nt->client_devs);
  275. }
  276. return 0;
  277. err:
  278. ntb_unregister_client_dev(device_name);
  279. return rc;
  280. }
  281. EXPORT_SYMBOL_GPL(ntb_register_client_dev);
  282. /**
  283. * ntb_register_client - Register NTB client driver
  284. * @drv: NTB client driver to be registered
  285. *
  286. * Register an NTB client driver with the NTB transport layer
  287. *
  288. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  289. */
  290. int ntb_register_client(struct ntb_client *drv)
  291. {
  292. drv->driver.bus = &ntb_bus_type;
  293. if (list_empty(&ntb_transport_list))
  294. return -ENODEV;
  295. return driver_register(&drv->driver);
  296. }
  297. EXPORT_SYMBOL_GPL(ntb_register_client);
  298. /**
  299. * ntb_unregister_client - Unregister NTB client driver
  300. * @drv: NTB client driver to be unregistered
  301. *
  302. * Unregister an NTB client driver with the NTB transport layer
  303. *
  304. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  305. */
  306. void ntb_unregister_client(struct ntb_client *drv)
  307. {
  308. driver_unregister(&drv->driver);
  309. }
  310. EXPORT_SYMBOL_GPL(ntb_unregister_client);
  311. static int debugfs_open(struct inode *inode, struct file *filp)
  312. {
  313. filp->private_data = inode->i_private;
  314. return 0;
  315. }
  316. static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
  317. loff_t *offp)
  318. {
  319. struct ntb_transport_qp *qp;
  320. char buf[1024];
  321. ssize_t ret, out_offset, out_count;
  322. out_count = 1024;
  323. qp = filp->private_data;
  324. out_offset = 0;
  325. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  326. "NTB QP stats\n");
  327. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  328. "rx_bytes - \t%llu\n", qp->rx_bytes);
  329. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  330. "rx_pkts - \t%llu\n", qp->rx_pkts);
  331. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  332. "rx_ring_empty - %llu\n", qp->rx_ring_empty);
  333. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  334. "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
  335. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  336. "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
  337. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  338. "rx_err_ver - \t%llu\n", qp->rx_err_ver);
  339. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  340. "rx_buff_begin - %p\n", qp->rx_buff_begin);
  341. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  342. "rx_offset - \t%p\n", qp->rx_offset);
  343. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  344. "rx_buff_end - \t%p\n", qp->rx_buff_end);
  345. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  346. "tx_bytes - \t%llu\n", qp->tx_bytes);
  347. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  348. "tx_pkts - \t%llu\n", qp->tx_pkts);
  349. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  350. "tx_ring_full - \t%llu\n", qp->tx_ring_full);
  351. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  352. "tx_mw_begin - \t%p\n", qp->tx_mw_begin);
  353. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  354. "tx_offset - \t%p\n", qp->tx_offset);
  355. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  356. "tx_mw_end - \t%p\n", qp->tx_mw_end);
  357. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  358. "QP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
  359. "Up" : "Down");
  360. ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
  361. return ret;
  362. }
  363. static const struct file_operations ntb_qp_debugfs_stats = {
  364. .owner = THIS_MODULE,
  365. .open = debugfs_open,
  366. .read = debugfs_read,
  367. };
  368. static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
  369. struct list_head *list)
  370. {
  371. unsigned long flags;
  372. spin_lock_irqsave(lock, flags);
  373. list_add_tail(entry, list);
  374. spin_unlock_irqrestore(lock, flags);
  375. }
  376. static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
  377. struct list_head *list)
  378. {
  379. struct ntb_queue_entry *entry;
  380. unsigned long flags;
  381. spin_lock_irqsave(lock, flags);
  382. if (list_empty(list)) {
  383. entry = NULL;
  384. goto out;
  385. }
  386. entry = list_first_entry(list, struct ntb_queue_entry, entry);
  387. list_del(&entry->entry);
  388. out:
  389. spin_unlock_irqrestore(lock, flags);
  390. return entry;
  391. }
  392. static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
  393. unsigned int qp_num)
  394. {
  395. struct ntb_transport_qp *qp = &nt->qps[qp_num];
  396. unsigned int rx_size, num_qps_mw;
  397. u8 mw_num = QP_TO_MW(qp_num);
  398. void *offset;
  399. WARN_ON(nt->mw[mw_num].virt_addr == 0);
  400. if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
  401. num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
  402. else
  403. num_qps_mw = nt->max_qps / NTB_NUM_MW;
  404. rx_size = nt->mw[mw_num].size / num_qps_mw;
  405. qp->rx_buff_begin = nt->mw[mw_num].virt_addr +
  406. (qp_num / NTB_NUM_MW * rx_size);
  407. qp->rx_buff_end = qp->rx_buff_begin + rx_size;
  408. qp->rx_offset = qp->rx_buff_begin;
  409. qp->rx_max_frame = min(transport_mtu, rx_size);
  410. /* setup the hdr offsets with 0's */
  411. for (offset = qp->rx_buff_begin + qp->rx_max_frame -
  412. sizeof(struct ntb_payload_header);
  413. offset < qp->rx_buff_end; offset += qp->rx_max_frame)
  414. memset(offset, 0, sizeof(struct ntb_payload_header));
  415. qp->rx_pkts = 0;
  416. qp->tx_pkts = 0;
  417. }
  418. static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
  419. {
  420. struct ntb_transport_mw *mw = &nt->mw[num_mw];
  421. struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
  422. /* Alloc memory for receiving data. Must be 4k aligned */
  423. mw->size = ALIGN(size, 4096);
  424. mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
  425. GFP_KERNEL);
  426. if (!mw->virt_addr) {
  427. dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
  428. (int) mw->size);
  429. return -ENOMEM;
  430. }
  431. /* Notify HW the memory location of the receive buffer */
  432. ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
  433. return 0;
  434. }
  435. static void ntb_qp_link_cleanup(struct work_struct *work)
  436. {
  437. struct ntb_transport_qp *qp = container_of(work,
  438. struct ntb_transport_qp,
  439. link_cleanup);
  440. struct ntb_transport *nt = qp->transport;
  441. struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
  442. if (qp->qp_link == NTB_LINK_DOWN) {
  443. cancel_delayed_work_sync(&qp->link_work);
  444. return;
  445. }
  446. if (qp->event_handler)
  447. qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
  448. dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
  449. qp->qp_link = NTB_LINK_DOWN;
  450. if (nt->transport_link == NTB_LINK_UP)
  451. schedule_delayed_work(&qp->link_work,
  452. msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
  453. }
  454. static void ntb_qp_link_down(struct ntb_transport_qp *qp)
  455. {
  456. schedule_work(&qp->link_cleanup);
  457. }
  458. static void ntb_transport_link_cleanup(struct work_struct *work)
  459. {
  460. struct ntb_transport *nt = container_of(work, struct ntb_transport,
  461. link_cleanup);
  462. int i;
  463. if (nt->transport_link == NTB_LINK_DOWN)
  464. cancel_delayed_work_sync(&nt->link_work);
  465. else
  466. nt->transport_link = NTB_LINK_DOWN;
  467. /* Pass along the info to any clients */
  468. for (i = 0; i < nt->max_qps; i++)
  469. if (!test_bit(i, &nt->qp_bitmap))
  470. ntb_qp_link_down(&nt->qps[i]);
  471. /* The scratchpad registers keep the values if the remote side
  472. * goes down, blast them now to give them a sane value the next
  473. * time they are accessed
  474. */
  475. for (i = 0; i < MAX_SPAD; i++)
  476. ntb_write_local_spad(nt->ndev, i, 0);
  477. }
  478. static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
  479. {
  480. struct ntb_transport *nt = data;
  481. switch (event) {
  482. case NTB_EVENT_HW_LINK_UP:
  483. schedule_delayed_work(&nt->link_work, 0);
  484. break;
  485. case NTB_EVENT_HW_LINK_DOWN:
  486. schedule_work(&nt->link_cleanup);
  487. break;
  488. default:
  489. BUG();
  490. }
  491. }
  492. static void ntb_transport_link_work(struct work_struct *work)
  493. {
  494. struct ntb_transport *nt = container_of(work, struct ntb_transport,
  495. link_work.work);
  496. struct ntb_device *ndev = nt->ndev;
  497. struct pci_dev *pdev = ntb_query_pdev(ndev);
  498. u32 val;
  499. int rc, i;
  500. /* send the local info */
  501. rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
  502. if (rc) {
  503. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  504. 0, VERSION);
  505. goto out;
  506. }
  507. rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
  508. if (rc) {
  509. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  510. (u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
  511. goto out;
  512. }
  513. rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
  514. if (rc) {
  515. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  516. (u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
  517. goto out;
  518. }
  519. rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
  520. if (rc) {
  521. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  522. nt->max_qps, NUM_QPS);
  523. goto out;
  524. }
  525. rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
  526. if (rc) {
  527. dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
  528. goto out;
  529. }
  530. rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
  531. if (rc) {
  532. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  533. val, QP_LINKS);
  534. goto out;
  535. }
  536. /* Query the remote side for its info */
  537. rc = ntb_read_remote_spad(ndev, VERSION, &val);
  538. if (rc) {
  539. dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
  540. goto out;
  541. }
  542. if (val != NTB_TRANSPORT_VERSION)
  543. goto out;
  544. dev_dbg(&pdev->dev, "Remote version = %d\n", val);
  545. rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
  546. if (rc) {
  547. dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
  548. goto out;
  549. }
  550. if (val != nt->max_qps)
  551. goto out;
  552. dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
  553. rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
  554. if (rc) {
  555. dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
  556. goto out;
  557. }
  558. if (!val)
  559. goto out;
  560. dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
  561. rc = ntb_set_mw(nt, 0, val);
  562. if (rc)
  563. goto out;
  564. rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
  565. if (rc) {
  566. dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
  567. goto out;
  568. }
  569. if (!val)
  570. goto out;
  571. dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
  572. rc = ntb_set_mw(nt, 1, val);
  573. if (rc)
  574. goto out;
  575. nt->transport_link = NTB_LINK_UP;
  576. for (i = 0; i < nt->max_qps; i++) {
  577. struct ntb_transport_qp *qp = &nt->qps[i];
  578. ntb_transport_setup_qp_mw(nt, i);
  579. if (qp->client_ready == NTB_LINK_UP)
  580. schedule_delayed_work(&qp->link_work, 0);
  581. }
  582. return;
  583. out:
  584. if (ntb_hw_link_status(ndev))
  585. schedule_delayed_work(&nt->link_work,
  586. msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
  587. }
  588. static void ntb_qp_link_work(struct work_struct *work)
  589. {
  590. struct ntb_transport_qp *qp = container_of(work,
  591. struct ntb_transport_qp,
  592. link_work.work);
  593. struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
  594. struct ntb_transport *nt = qp->transport;
  595. int rc, val;
  596. WARN_ON(nt->transport_link != NTB_LINK_UP);
  597. rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
  598. if (rc) {
  599. dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
  600. return;
  601. }
  602. rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
  603. if (rc)
  604. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  605. val | 1 << qp->qp_num, QP_LINKS);
  606. /* query remote spad for qp ready bits */
  607. rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
  608. if (rc)
  609. dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
  610. dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
  611. /* See if the remote side is up */
  612. if (1 << qp->qp_num & val) {
  613. qp->qp_link = NTB_LINK_UP;
  614. dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
  615. if (qp->event_handler)
  616. qp->event_handler(qp->cb_data, NTB_LINK_UP);
  617. } else if (nt->transport_link == NTB_LINK_UP)
  618. schedule_delayed_work(&qp->link_work,
  619. msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
  620. }
  621. static void ntb_transport_init_queue(struct ntb_transport *nt,
  622. unsigned int qp_num)
  623. {
  624. struct ntb_transport_qp *qp;
  625. unsigned int num_qps_mw, tx_size;
  626. u8 mw_num = QP_TO_MW(qp_num);
  627. qp = &nt->qps[qp_num];
  628. qp->qp_num = qp_num;
  629. qp->transport = nt;
  630. qp->ndev = nt->ndev;
  631. qp->qp_link = NTB_LINK_DOWN;
  632. qp->client_ready = NTB_LINK_DOWN;
  633. qp->event_handler = NULL;
  634. if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
  635. num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
  636. else
  637. num_qps_mw = nt->max_qps / NTB_NUM_MW;
  638. tx_size = ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
  639. qp->tx_mw_begin = ntb_get_mw_vbase(nt->ndev, mw_num) +
  640. (qp_num / NTB_NUM_MW * tx_size);
  641. qp->tx_mw_end = qp->tx_mw_begin + tx_size;
  642. qp->tx_offset = qp->tx_mw_begin;
  643. qp->tx_max_frame = min(transport_mtu, tx_size);
  644. if (nt->debugfs_dir) {
  645. char debugfs_name[4];
  646. snprintf(debugfs_name, 4, "qp%d", qp_num);
  647. qp->debugfs_dir = debugfs_create_dir(debugfs_name,
  648. nt->debugfs_dir);
  649. qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
  650. qp->debugfs_dir, qp,
  651. &ntb_qp_debugfs_stats);
  652. }
  653. INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
  654. INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
  655. spin_lock_init(&qp->ntb_rx_pend_q_lock);
  656. spin_lock_init(&qp->ntb_rx_free_q_lock);
  657. spin_lock_init(&qp->ntb_tx_free_q_lock);
  658. INIT_LIST_HEAD(&qp->rx_pend_q);
  659. INIT_LIST_HEAD(&qp->rx_free_q);
  660. INIT_LIST_HEAD(&qp->tx_free_q);
  661. }
  662. int ntb_transport_init(struct pci_dev *pdev)
  663. {
  664. struct ntb_transport *nt;
  665. int rc, i;
  666. nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
  667. if (!nt)
  668. return -ENOMEM;
  669. if (debugfs_initialized())
  670. nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
  671. else
  672. nt->debugfs_dir = NULL;
  673. nt->ndev = ntb_register_transport(pdev, nt);
  674. if (!nt->ndev) {
  675. rc = -EIO;
  676. goto err;
  677. }
  678. nt->max_qps = min(nt->ndev->max_cbs, max_num_clients);
  679. nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
  680. GFP_KERNEL);
  681. if (!nt->qps) {
  682. rc = -ENOMEM;
  683. goto err1;
  684. }
  685. nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
  686. for (i = 0; i < nt->max_qps; i++)
  687. ntb_transport_init_queue(nt, i);
  688. INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
  689. INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
  690. rc = ntb_register_event_callback(nt->ndev,
  691. ntb_transport_event_callback);
  692. if (rc)
  693. goto err2;
  694. INIT_LIST_HEAD(&nt->client_devs);
  695. rc = ntb_bus_init(nt);
  696. if (rc)
  697. goto err3;
  698. if (ntb_hw_link_status(nt->ndev))
  699. schedule_delayed_work(&nt->link_work, 0);
  700. return 0;
  701. err3:
  702. ntb_unregister_event_callback(nt->ndev);
  703. err2:
  704. kfree(nt->qps);
  705. err1:
  706. ntb_unregister_transport(nt->ndev);
  707. err:
  708. debugfs_remove_recursive(nt->debugfs_dir);
  709. kfree(nt);
  710. return rc;
  711. }
  712. void ntb_transport_free(void *transport)
  713. {
  714. struct ntb_transport *nt = transport;
  715. struct pci_dev *pdev;
  716. int i;
  717. nt->transport_link = NTB_LINK_DOWN;
  718. /* verify that all the qp's are freed */
  719. for (i = 0; i < nt->max_qps; i++)
  720. if (!test_bit(i, &nt->qp_bitmap))
  721. ntb_transport_free_queue(&nt->qps[i]);
  722. ntb_bus_remove(nt);
  723. cancel_delayed_work_sync(&nt->link_work);
  724. debugfs_remove_recursive(nt->debugfs_dir);
  725. ntb_unregister_event_callback(nt->ndev);
  726. pdev = ntb_query_pdev(nt->ndev);
  727. for (i = 0; i < NTB_NUM_MW; i++)
  728. if (nt->mw[i].virt_addr)
  729. dma_free_coherent(&pdev->dev, nt->mw[i].size,
  730. nt->mw[i].virt_addr,
  731. nt->mw[i].dma_addr);
  732. kfree(nt->qps);
  733. ntb_unregister_transport(nt->ndev);
  734. kfree(nt);
  735. }
  736. static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
  737. struct ntb_queue_entry *entry, void *offset)
  738. {
  739. struct ntb_payload_header *hdr;
  740. BUG_ON(offset < qp->rx_buff_begin ||
  741. offset + qp->rx_max_frame >= qp->rx_buff_end);
  742. hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
  743. entry->len = hdr->len;
  744. memcpy(entry->buf, offset, entry->len);
  745. /* Ensure that the data is fully copied out before clearing the flag */
  746. wmb();
  747. hdr->flags = 0;
  748. if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
  749. qp->rx_handler(qp, qp->cb_data, entry->cb_data, entry->len);
  750. ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
  751. }
  752. static int ntb_process_rxc(struct ntb_transport_qp *qp)
  753. {
  754. struct ntb_payload_header *hdr;
  755. struct ntb_queue_entry *entry;
  756. void *offset;
  757. entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
  758. if (!entry) {
  759. hdr = offset + qp->rx_max_frame -
  760. sizeof(struct ntb_payload_header);
  761. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  762. "no buffer - HDR ver %llu, len %d, flags %x\n",
  763. hdr->ver, hdr->len, hdr->flags);
  764. qp->rx_err_no_buf++;
  765. return -ENOMEM;
  766. }
  767. offset = qp->rx_offset;
  768. hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
  769. if (!(hdr->flags & DESC_DONE_FLAG)) {
  770. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  771. &qp->rx_pend_q);
  772. qp->rx_ring_empty++;
  773. return -EAGAIN;
  774. }
  775. if (hdr->ver != qp->rx_pkts) {
  776. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  777. "qp %d: version mismatch, expected %llu - got %llu\n",
  778. qp->qp_num, qp->rx_pkts, hdr->ver);
  779. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  780. &qp->rx_pend_q);
  781. qp->rx_err_ver++;
  782. return -EIO;
  783. }
  784. if (hdr->flags & LINK_DOWN_FLAG) {
  785. ntb_qp_link_down(qp);
  786. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  787. &qp->rx_pend_q);
  788. /* Ensure that the data is fully copied out before clearing the
  789. * done flag
  790. */
  791. wmb();
  792. hdr->flags = 0;
  793. goto out;
  794. }
  795. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  796. "rx offset %p, ver %llu - %d payload received, buf size %d\n",
  797. qp->rx_offset, hdr->ver, hdr->len, entry->len);
  798. if (hdr->len <= entry->len)
  799. ntb_rx_copy_task(qp, entry, offset);
  800. else {
  801. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  802. &qp->rx_pend_q);
  803. /* Ensure that the data is fully copied out before clearing the
  804. * done flag
  805. */
  806. wmb();
  807. hdr->flags = 0;
  808. qp->rx_err_oflow++;
  809. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  810. "RX overflow! Wanted %d got %d\n",
  811. hdr->len, entry->len);
  812. }
  813. qp->rx_bytes += hdr->len;
  814. qp->rx_pkts++;
  815. out:
  816. qp->rx_offset += qp->rx_max_frame;
  817. if (qp->rx_offset + qp->rx_max_frame >= qp->rx_buff_end)
  818. qp->rx_offset = qp->rx_buff_begin;
  819. return 0;
  820. }
  821. static void ntb_transport_rx(unsigned long data)
  822. {
  823. struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
  824. int rc;
  825. do {
  826. rc = ntb_process_rxc(qp);
  827. } while (!rc);
  828. }
  829. static void ntb_transport_rxc_db(void *data, int db_num)
  830. {
  831. struct ntb_transport_qp *qp = data;
  832. dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
  833. __func__, db_num);
  834. tasklet_schedule(&qp->rx_work);
  835. }
  836. static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
  837. struct ntb_queue_entry *entry,
  838. void *offset)
  839. {
  840. struct ntb_payload_header *hdr;
  841. BUG_ON(offset < qp->tx_mw_begin ||
  842. offset + qp->tx_max_frame >= qp->tx_mw_end);
  843. memcpy_toio(offset, entry->buf, entry->len);
  844. hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
  845. hdr->len = entry->len;
  846. hdr->ver = qp->tx_pkts;
  847. /* Ensure that the data is fully copied out before setting the flag */
  848. wmb();
  849. hdr->flags = entry->flags | DESC_DONE_FLAG;
  850. ntb_ring_sdb(qp->ndev, qp->qp_num);
  851. /* The entry length can only be zero if the packet is intended to be a
  852. * "link down" or similar. Since no payload is being sent in these
  853. * cases, there is nothing to add to the completion queue.
  854. */
  855. if (entry->len > 0) {
  856. qp->tx_bytes += entry->len;
  857. if (qp->tx_handler)
  858. qp->tx_handler(qp, qp->cb_data, entry->cb_data,
  859. entry->len);
  860. }
  861. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
  862. }
  863. static int ntb_process_tx(struct ntb_transport_qp *qp,
  864. struct ntb_queue_entry *entry)
  865. {
  866. struct ntb_payload_header *hdr;
  867. void *offset;
  868. offset = qp->tx_offset;
  869. hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
  870. dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %p, entry len %d flags %x buff %p\n",
  871. qp->tx_pkts, offset, qp->tx_offset, entry->len, entry->flags,
  872. entry->buf);
  873. if (hdr->flags) {
  874. qp->tx_ring_full++;
  875. return -EAGAIN;
  876. }
  877. if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
  878. if (qp->tx_handler)
  879. qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
  880. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
  881. &qp->tx_free_q);
  882. return 0;
  883. }
  884. ntb_tx_copy_task(qp, entry, offset);
  885. qp->tx_offset += qp->tx_max_frame;
  886. if (qp->tx_offset + qp->tx_max_frame >= qp->tx_mw_end)
  887. qp->tx_offset = qp->tx_mw_begin;
  888. qp->tx_pkts++;
  889. return 0;
  890. }
  891. static void ntb_send_link_down(struct ntb_transport_qp *qp)
  892. {
  893. struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
  894. struct ntb_queue_entry *entry;
  895. int i, rc;
  896. if (qp->qp_link == NTB_LINK_DOWN)
  897. return;
  898. qp->qp_link = NTB_LINK_DOWN;
  899. dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
  900. for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
  901. entry = ntb_list_rm(&qp->ntb_tx_free_q_lock,
  902. &qp->tx_free_q);
  903. if (entry)
  904. break;
  905. msleep(100);
  906. }
  907. if (!entry)
  908. return;
  909. entry->cb_data = NULL;
  910. entry->buf = NULL;
  911. entry->len = 0;
  912. entry->flags = LINK_DOWN_FLAG;
  913. rc = ntb_process_tx(qp, entry);
  914. if (rc)
  915. dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
  916. qp->qp_num);
  917. }
  918. /**
  919. * ntb_transport_create_queue - Create a new NTB transport layer queue
  920. * @rx_handler: receive callback function
  921. * @tx_handler: transmit callback function
  922. * @event_handler: event callback function
  923. *
  924. * Create a new NTB transport layer queue and provide the queue with a callback
  925. * routine for both transmit and receive. The receive callback routine will be
  926. * used to pass up data when the transport has received it on the queue. The
  927. * transmit callback routine will be called when the transport has completed the
  928. * transmission of the data on the queue and the data is ready to be freed.
  929. *
  930. * RETURNS: pointer to newly created ntb_queue, NULL on error.
  931. */
  932. struct ntb_transport_qp *
  933. ntb_transport_create_queue(void *data, struct pci_dev *pdev,
  934. const struct ntb_queue_handlers *handlers)
  935. {
  936. struct ntb_queue_entry *entry;
  937. struct ntb_transport_qp *qp;
  938. struct ntb_transport *nt;
  939. unsigned int free_queue;
  940. int rc, i;
  941. nt = ntb_find_transport(pdev);
  942. if (!nt)
  943. goto err;
  944. free_queue = ffs(nt->qp_bitmap);
  945. if (!free_queue)
  946. goto err;
  947. /* decrement free_queue to make it zero based */
  948. free_queue--;
  949. clear_bit(free_queue, &nt->qp_bitmap);
  950. qp = &nt->qps[free_queue];
  951. qp->cb_data = data;
  952. qp->rx_handler = handlers->rx_handler;
  953. qp->tx_handler = handlers->tx_handler;
  954. qp->event_handler = handlers->event_handler;
  955. for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
  956. entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
  957. if (!entry)
  958. goto err1;
  959. ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
  960. &qp->rx_free_q);
  961. }
  962. for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
  963. entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
  964. if (!entry)
  965. goto err2;
  966. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
  967. &qp->tx_free_q);
  968. }
  969. tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
  970. rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
  971. ntb_transport_rxc_db);
  972. if (rc)
  973. goto err3;
  974. dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
  975. return qp;
  976. err3:
  977. tasklet_disable(&qp->rx_work);
  978. err2:
  979. while ((entry =
  980. ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
  981. kfree(entry);
  982. err1:
  983. while ((entry =
  984. ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
  985. kfree(entry);
  986. set_bit(free_queue, &nt->qp_bitmap);
  987. err:
  988. return NULL;
  989. }
  990. EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
  991. /**
  992. * ntb_transport_free_queue - Frees NTB transport queue
  993. * @qp: NTB queue to be freed
  994. *
  995. * Frees NTB transport queue
  996. */
  997. void ntb_transport_free_queue(struct ntb_transport_qp *qp)
  998. {
  999. struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
  1000. struct ntb_queue_entry *entry;
  1001. if (!qp)
  1002. return;
  1003. cancel_delayed_work_sync(&qp->link_work);
  1004. ntb_unregister_db_callback(qp->ndev, qp->qp_num);
  1005. tasklet_disable(&qp->rx_work);
  1006. while ((entry =
  1007. ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
  1008. kfree(entry);
  1009. while ((entry =
  1010. ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
  1011. dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
  1012. kfree(entry);
  1013. }
  1014. while ((entry =
  1015. ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
  1016. kfree(entry);
  1017. set_bit(qp->qp_num, &qp->transport->qp_bitmap);
  1018. dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
  1019. }
  1020. EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
  1021. /**
  1022. * ntb_transport_rx_remove - Dequeues enqueued rx packet
  1023. * @qp: NTB queue to be freed
  1024. * @len: pointer to variable to write enqueued buffers length
  1025. *
  1026. * Dequeues unused buffers from receive queue. Should only be used during
  1027. * shutdown of qp.
  1028. *
  1029. * RETURNS: NULL error value on error, or void* for success.
  1030. */
  1031. void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
  1032. {
  1033. struct ntb_queue_entry *entry;
  1034. void *buf;
  1035. if (!qp || qp->client_ready == NTB_LINK_UP)
  1036. return NULL;
  1037. entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
  1038. if (!entry)
  1039. return NULL;
  1040. buf = entry->cb_data;
  1041. *len = entry->len;
  1042. ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
  1043. &qp->rx_free_q);
  1044. return buf;
  1045. }
  1046. EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
  1047. /**
  1048. * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
  1049. * @qp: NTB transport layer queue the entry is to be enqueued on
  1050. * @cb: per buffer pointer for callback function to use
  1051. * @data: pointer to data buffer that incoming packets will be copied into
  1052. * @len: length of the data buffer
  1053. *
  1054. * Enqueue a new receive buffer onto the transport queue into which a NTB
  1055. * payload can be received into.
  1056. *
  1057. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  1058. */
  1059. int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
  1060. unsigned int len)
  1061. {
  1062. struct ntb_queue_entry *entry;
  1063. if (!qp)
  1064. return -EINVAL;
  1065. entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
  1066. if (!entry)
  1067. return -ENOMEM;
  1068. entry->cb_data = cb;
  1069. entry->buf = data;
  1070. entry->len = len;
  1071. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  1072. &qp->rx_pend_q);
  1073. return 0;
  1074. }
  1075. EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
  1076. /**
  1077. * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
  1078. * @qp: NTB transport layer queue the entry is to be enqueued on
  1079. * @cb: per buffer pointer for callback function to use
  1080. * @data: pointer to data buffer that will be sent
  1081. * @len: length of the data buffer
  1082. *
  1083. * Enqueue a new transmit buffer onto the transport queue from which a NTB
  1084. * payload will be transmitted. This assumes that a lock is behing held to
  1085. * serialize access to the qp.
  1086. *
  1087. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  1088. */
  1089. int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
  1090. unsigned int len)
  1091. {
  1092. struct ntb_queue_entry *entry;
  1093. int rc;
  1094. if (!qp || qp->qp_link != NTB_LINK_UP || !len)
  1095. return -EINVAL;
  1096. entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
  1097. if (!entry)
  1098. return -ENOMEM;
  1099. entry->cb_data = cb;
  1100. entry->buf = data;
  1101. entry->len = len;
  1102. entry->flags = 0;
  1103. rc = ntb_process_tx(qp, entry);
  1104. if (rc)
  1105. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
  1106. &qp->tx_free_q);
  1107. return rc;
  1108. }
  1109. EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
  1110. /**
  1111. * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
  1112. * @qp: NTB transport layer queue to be enabled
  1113. *
  1114. * Notify NTB transport layer of client readiness to use queue
  1115. */
  1116. void ntb_transport_link_up(struct ntb_transport_qp *qp)
  1117. {
  1118. if (!qp)
  1119. return;
  1120. qp->client_ready = NTB_LINK_UP;
  1121. if (qp->transport->transport_link == NTB_LINK_UP)
  1122. schedule_delayed_work(&qp->link_work, 0);
  1123. }
  1124. EXPORT_SYMBOL_GPL(ntb_transport_link_up);
  1125. /**
  1126. * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
  1127. * @qp: NTB transport layer queue to be disabled
  1128. *
  1129. * Notify NTB transport layer of client's desire to no longer receive data on
  1130. * transport queue specified. It is the client's responsibility to ensure all
  1131. * entries on queue are purged or otherwise handled appropraitely.
  1132. */
  1133. void ntb_transport_link_down(struct ntb_transport_qp *qp)
  1134. {
  1135. struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
  1136. int rc, val;
  1137. if (!qp)
  1138. return;
  1139. qp->client_ready = NTB_LINK_DOWN;
  1140. rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
  1141. if (rc) {
  1142. dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
  1143. return;
  1144. }
  1145. rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
  1146. val & ~(1 << qp->qp_num));
  1147. if (rc)
  1148. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  1149. val & ~(1 << qp->qp_num), QP_LINKS);
  1150. if (qp->qp_link == NTB_LINK_UP)
  1151. ntb_send_link_down(qp);
  1152. else
  1153. cancel_delayed_work_sync(&qp->link_work);
  1154. }
  1155. EXPORT_SYMBOL_GPL(ntb_transport_link_down);
  1156. /**
  1157. * ntb_transport_link_query - Query transport link state
  1158. * @qp: NTB transport layer queue to be queried
  1159. *
  1160. * Query connectivity to the remote system of the NTB transport queue
  1161. *
  1162. * RETURNS: true for link up or false for link down
  1163. */
  1164. bool ntb_transport_link_query(struct ntb_transport_qp *qp)
  1165. {
  1166. return qp->qp_link == NTB_LINK_UP;
  1167. }
  1168. EXPORT_SYMBOL_GPL(ntb_transport_link_query);
  1169. /**
  1170. * ntb_transport_qp_num - Query the qp number
  1171. * @qp: NTB transport layer queue to be queried
  1172. *
  1173. * Query qp number of the NTB transport queue
  1174. *
  1175. * RETURNS: a zero based number specifying the qp number
  1176. */
  1177. unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
  1178. {
  1179. return qp->qp_num;
  1180. }
  1181. EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
  1182. /**
  1183. * ntb_transport_max_size - Query the max payload size of a qp
  1184. * @qp: NTB transport layer queue to be queried
  1185. *
  1186. * Query the maximum payload size permissible on the given qp
  1187. *
  1188. * RETURNS: the max payload size of a qp
  1189. */
  1190. unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
  1191. {
  1192. return qp->tx_max_frame - sizeof(struct ntb_payload_header);
  1193. }
  1194. EXPORT_SYMBOL_GPL(ntb_transport_max_size);