ntb_transport.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2012 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * BSD LICENSE
  14. *
  15. * Copyright(c) 2012 Intel Corporation. All rights reserved.
  16. *
  17. * Redistribution and use in source and binary forms, with or without
  18. * modification, are permitted provided that the following conditions
  19. * are met:
  20. *
  21. * * Redistributions of source code must retain the above copyright
  22. * notice, this list of conditions and the following disclaimer.
  23. * * Redistributions in binary form must reproduce the above copy
  24. * notice, this list of conditions and the following disclaimer in
  25. * the documentation and/or other materials provided with the
  26. * distribution.
  27. * * Neither the name of Intel Corporation nor the names of its
  28. * contributors may be used to endorse or promote products derived
  29. * from this software without specific prior written permission.
  30. *
  31. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  32. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  33. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  34. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  35. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  36. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  37. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  38. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  39. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  40. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  41. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42. *
  43. * Intel PCIe NTB Linux driver
  44. *
  45. * Contact Information:
  46. * Jon Mason <jon.mason@intel.com>
  47. */
  48. #include <linux/debugfs.h>
  49. #include <linux/delay.h>
  50. #include <linux/dma-mapping.h>
  51. #include <linux/errno.h>
  52. #include <linux/export.h>
  53. #include <linux/interrupt.h>
  54. #include <linux/module.h>
  55. #include <linux/pci.h>
  56. #include <linux/slab.h>
  57. #include <linux/types.h>
  58. #include <linux/ntb.h>
  59. #include "ntb_hw.h"
  60. #define NTB_TRANSPORT_VERSION 3
  61. static unsigned int transport_mtu = 0x401E;
  62. module_param(transport_mtu, uint, 0644);
  63. MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
  64. static unsigned char max_num_clients = 2;
  65. module_param(max_num_clients, byte, 0644);
  66. MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
  67. struct ntb_queue_entry {
  68. /* ntb_queue list reference */
  69. struct list_head entry;
  70. /* pointers to data to be transfered */
  71. void *cb_data;
  72. void *buf;
  73. unsigned int len;
  74. unsigned int flags;
  75. };
  76. struct ntb_rx_info {
  77. unsigned int entry;
  78. };
  79. struct ntb_transport_qp {
  80. struct ntb_transport *transport;
  81. struct ntb_device *ndev;
  82. void *cb_data;
  83. bool client_ready;
  84. bool qp_link;
  85. u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
  86. struct ntb_rx_info __iomem *rx_info;
  87. struct ntb_rx_info *remote_rx_info;
  88. void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
  89. void *data, int len);
  90. struct list_head tx_free_q;
  91. spinlock_t ntb_tx_free_q_lock;
  92. void __iomem *tx_mw;
  93. unsigned int tx_index;
  94. unsigned int tx_max_entry;
  95. unsigned int tx_max_frame;
  96. void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
  97. void *data, int len);
  98. struct tasklet_struct rx_work;
  99. struct list_head rx_pend_q;
  100. struct list_head rx_free_q;
  101. spinlock_t ntb_rx_pend_q_lock;
  102. spinlock_t ntb_rx_free_q_lock;
  103. void *rx_buff;
  104. unsigned int rx_index;
  105. unsigned int rx_max_entry;
  106. unsigned int rx_max_frame;
  107. void (*event_handler) (void *data, int status);
  108. struct delayed_work link_work;
  109. struct work_struct link_cleanup;
  110. struct dentry *debugfs_dir;
  111. struct dentry *debugfs_stats;
  112. /* Stats */
  113. u64 rx_bytes;
  114. u64 rx_pkts;
  115. u64 rx_ring_empty;
  116. u64 rx_err_no_buf;
  117. u64 rx_err_oflow;
  118. u64 rx_err_ver;
  119. u64 tx_bytes;
  120. u64 tx_pkts;
  121. u64 tx_ring_full;
  122. };
  123. struct ntb_transport_mw {
  124. size_t size;
  125. void *virt_addr;
  126. dma_addr_t dma_addr;
  127. };
  128. struct ntb_transport_client_dev {
  129. struct list_head entry;
  130. struct device dev;
  131. };
  132. struct ntb_transport {
  133. struct list_head entry;
  134. struct list_head client_devs;
  135. struct ntb_device *ndev;
  136. struct ntb_transport_mw mw[NTB_NUM_MW];
  137. struct ntb_transport_qp *qps;
  138. unsigned int max_qps;
  139. unsigned long qp_bitmap;
  140. bool transport_link;
  141. struct delayed_work link_work;
  142. struct work_struct link_cleanup;
  143. struct dentry *debugfs_dir;
  144. };
  145. enum {
  146. DESC_DONE_FLAG = 1 << 0,
  147. LINK_DOWN_FLAG = 1 << 1,
  148. };
  149. struct ntb_payload_header {
  150. unsigned int ver;
  151. unsigned int len;
  152. unsigned int flags;
  153. };
  154. enum {
  155. VERSION = 0,
  156. QP_LINKS,
  157. NUM_QPS,
  158. NUM_MWS,
  159. MW0_SZ_HIGH,
  160. MW0_SZ_LOW,
  161. MW1_SZ_HIGH,
  162. MW1_SZ_LOW,
  163. MAX_SPAD,
  164. };
  165. #define QP_TO_MW(qp) ((qp) % NTB_NUM_MW)
  166. #define NTB_QP_DEF_NUM_ENTRIES 100
  167. #define NTB_LINK_DOWN_TIMEOUT 10
  168. static int ntb_match_bus(struct device *dev, struct device_driver *drv)
  169. {
  170. return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
  171. }
  172. static int ntb_client_probe(struct device *dev)
  173. {
  174. const struct ntb_client *drv = container_of(dev->driver,
  175. struct ntb_client, driver);
  176. struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
  177. int rc = -EINVAL;
  178. get_device(dev);
  179. if (drv && drv->probe)
  180. rc = drv->probe(pdev);
  181. if (rc)
  182. put_device(dev);
  183. return rc;
  184. }
  185. static int ntb_client_remove(struct device *dev)
  186. {
  187. const struct ntb_client *drv = container_of(dev->driver,
  188. struct ntb_client, driver);
  189. struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
  190. if (drv && drv->remove)
  191. drv->remove(pdev);
  192. put_device(dev);
  193. return 0;
  194. }
  195. static struct bus_type ntb_bus_type = {
  196. .name = "ntb_bus",
  197. .match = ntb_match_bus,
  198. .probe = ntb_client_probe,
  199. .remove = ntb_client_remove,
  200. };
  201. static LIST_HEAD(ntb_transport_list);
  202. static int ntb_bus_init(struct ntb_transport *nt)
  203. {
  204. if (list_empty(&ntb_transport_list)) {
  205. int rc = bus_register(&ntb_bus_type);
  206. if (rc)
  207. return rc;
  208. }
  209. list_add(&nt->entry, &ntb_transport_list);
  210. return 0;
  211. }
  212. static void ntb_bus_remove(struct ntb_transport *nt)
  213. {
  214. struct ntb_transport_client_dev *client_dev, *cd;
  215. list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
  216. dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
  217. dev_name(&client_dev->dev));
  218. list_del(&client_dev->entry);
  219. device_unregister(&client_dev->dev);
  220. }
  221. list_del(&nt->entry);
  222. if (list_empty(&ntb_transport_list))
  223. bus_unregister(&ntb_bus_type);
  224. }
  225. static void ntb_client_release(struct device *dev)
  226. {
  227. struct ntb_transport_client_dev *client_dev;
  228. client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
  229. kfree(client_dev);
  230. }
  231. /**
  232. * ntb_unregister_client_dev - Unregister NTB client device
  233. * @device_name: Name of NTB client device
  234. *
  235. * Unregister an NTB client device with the NTB transport layer
  236. */
  237. void ntb_unregister_client_dev(char *device_name)
  238. {
  239. struct ntb_transport_client_dev *client, *cd;
  240. struct ntb_transport *nt;
  241. list_for_each_entry(nt, &ntb_transport_list, entry)
  242. list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
  243. if (!strncmp(dev_name(&client->dev), device_name,
  244. strlen(device_name))) {
  245. list_del(&client->entry);
  246. device_unregister(&client->dev);
  247. }
  248. }
  249. EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
  250. /**
  251. * ntb_register_client_dev - Register NTB client device
  252. * @device_name: Name of NTB client device
  253. *
  254. * Register an NTB client device with the NTB transport layer
  255. */
  256. int ntb_register_client_dev(char *device_name)
  257. {
  258. struct ntb_transport_client_dev *client_dev;
  259. struct ntb_transport *nt;
  260. int rc;
  261. if (list_empty(&ntb_transport_list))
  262. return -ENODEV;
  263. list_for_each_entry(nt, &ntb_transport_list, entry) {
  264. struct device *dev;
  265. client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
  266. GFP_KERNEL);
  267. if (!client_dev) {
  268. rc = -ENOMEM;
  269. goto err;
  270. }
  271. dev = &client_dev->dev;
  272. /* setup and register client devices */
  273. dev_set_name(dev, "%s", device_name);
  274. dev->bus = &ntb_bus_type;
  275. dev->release = ntb_client_release;
  276. dev->parent = &ntb_query_pdev(nt->ndev)->dev;
  277. rc = device_register(dev);
  278. if (rc) {
  279. kfree(client_dev);
  280. goto err;
  281. }
  282. list_add_tail(&client_dev->entry, &nt->client_devs);
  283. }
  284. return 0;
  285. err:
  286. ntb_unregister_client_dev(device_name);
  287. return rc;
  288. }
  289. EXPORT_SYMBOL_GPL(ntb_register_client_dev);
  290. /**
  291. * ntb_register_client - Register NTB client driver
  292. * @drv: NTB client driver to be registered
  293. *
  294. * Register an NTB client driver with the NTB transport layer
  295. *
  296. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  297. */
  298. int ntb_register_client(struct ntb_client *drv)
  299. {
  300. drv->driver.bus = &ntb_bus_type;
  301. if (list_empty(&ntb_transport_list))
  302. return -ENODEV;
  303. return driver_register(&drv->driver);
  304. }
  305. EXPORT_SYMBOL_GPL(ntb_register_client);
  306. /**
  307. * ntb_unregister_client - Unregister NTB client driver
  308. * @drv: NTB client driver to be unregistered
  309. *
  310. * Unregister an NTB client driver with the NTB transport layer
  311. *
  312. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  313. */
  314. void ntb_unregister_client(struct ntb_client *drv)
  315. {
  316. driver_unregister(&drv->driver);
  317. }
  318. EXPORT_SYMBOL_GPL(ntb_unregister_client);
  319. static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
  320. loff_t *offp)
  321. {
  322. struct ntb_transport_qp *qp;
  323. char *buf;
  324. ssize_t ret, out_offset, out_count;
  325. out_count = 600;
  326. buf = kmalloc(out_count, GFP_KERNEL);
  327. if (!buf)
  328. return -ENOMEM;
  329. qp = filp->private_data;
  330. out_offset = 0;
  331. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  332. "NTB QP stats\n");
  333. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  334. "rx_bytes - \t%llu\n", qp->rx_bytes);
  335. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  336. "rx_pkts - \t%llu\n", qp->rx_pkts);
  337. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  338. "rx_ring_empty - %llu\n", qp->rx_ring_empty);
  339. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  340. "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
  341. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  342. "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
  343. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  344. "rx_err_ver - \t%llu\n", qp->rx_err_ver);
  345. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  346. "rx_buff - \t%p\n", qp->rx_buff);
  347. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  348. "rx_index - \t%u\n", qp->rx_index);
  349. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  350. "rx_max_entry - \t%u\n", qp->rx_max_entry);
  351. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  352. "tx_bytes - \t%llu\n", qp->tx_bytes);
  353. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  354. "tx_pkts - \t%llu\n", qp->tx_pkts);
  355. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  356. "tx_ring_full - \t%llu\n", qp->tx_ring_full);
  357. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  358. "tx_mw - \t%p\n", qp->tx_mw);
  359. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  360. "tx_index - \t%u\n", qp->tx_index);
  361. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  362. "tx_max_entry - \t%u\n", qp->tx_max_entry);
  363. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  364. "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
  365. "Up" : "Down");
  366. if (out_offset > out_count)
  367. out_offset = out_count;
  368. ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
  369. kfree(buf);
  370. return ret;
  371. }
  372. static const struct file_operations ntb_qp_debugfs_stats = {
  373. .owner = THIS_MODULE,
  374. .open = simple_open,
  375. .read = debugfs_read,
  376. };
  377. static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
  378. struct list_head *list)
  379. {
  380. unsigned long flags;
  381. spin_lock_irqsave(lock, flags);
  382. list_add_tail(entry, list);
  383. spin_unlock_irqrestore(lock, flags);
  384. }
  385. static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
  386. struct list_head *list)
  387. {
  388. struct ntb_queue_entry *entry;
  389. unsigned long flags;
  390. spin_lock_irqsave(lock, flags);
  391. if (list_empty(list)) {
  392. entry = NULL;
  393. goto out;
  394. }
  395. entry = list_first_entry(list, struct ntb_queue_entry, entry);
  396. list_del(&entry->entry);
  397. out:
  398. spin_unlock_irqrestore(lock, flags);
  399. return entry;
  400. }
  401. static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
  402. unsigned int qp_num)
  403. {
  404. struct ntb_transport_qp *qp = &nt->qps[qp_num];
  405. unsigned int rx_size, num_qps_mw;
  406. u8 mw_num = QP_TO_MW(qp_num);
  407. unsigned int i;
  408. WARN_ON(nt->mw[mw_num].virt_addr == NULL);
  409. if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
  410. num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
  411. else
  412. num_qps_mw = nt->max_qps / NTB_NUM_MW;
  413. rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
  414. qp->remote_rx_info = nt->mw[mw_num].virt_addr +
  415. (qp_num / NTB_NUM_MW * rx_size);
  416. rx_size -= sizeof(struct ntb_rx_info);
  417. qp->rx_buff = qp->remote_rx_info + 1;
  418. qp->rx_max_frame = min(transport_mtu, rx_size);
  419. qp->rx_max_entry = rx_size / qp->rx_max_frame;
  420. qp->rx_index = 0;
  421. qp->remote_rx_info->entry = qp->rx_max_entry;
  422. /* setup the hdr offsets with 0's */
  423. for (i = 0; i < qp->rx_max_entry; i++) {
  424. void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
  425. sizeof(struct ntb_payload_header);
  426. memset(offset, 0, sizeof(struct ntb_payload_header));
  427. }
  428. qp->rx_pkts = 0;
  429. qp->tx_pkts = 0;
  430. }
  431. static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
  432. {
  433. struct ntb_transport_mw *mw = &nt->mw[num_mw];
  434. struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
  435. if (!mw->virt_addr)
  436. return;
  437. dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
  438. mw->virt_addr = NULL;
  439. }
  440. static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
  441. {
  442. struct ntb_transport_mw *mw = &nt->mw[num_mw];
  443. struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
  444. /* No need to re-setup */
  445. if (mw->size == ALIGN(size, 4096))
  446. return 0;
  447. if (mw->size != 0)
  448. ntb_free_mw(nt, num_mw);
  449. /* Alloc memory for receiving data. Must be 4k aligned */
  450. mw->size = ALIGN(size, 4096);
  451. mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
  452. GFP_KERNEL);
  453. if (!mw->virt_addr) {
  454. mw->size = 0;
  455. dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
  456. (int) mw->size);
  457. return -ENOMEM;
  458. }
  459. /* Notify HW the memory location of the receive buffer */
  460. ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
  461. return 0;
  462. }
  463. static void ntb_qp_link_cleanup(struct work_struct *work)
  464. {
  465. struct ntb_transport_qp *qp = container_of(work,
  466. struct ntb_transport_qp,
  467. link_cleanup);
  468. struct ntb_transport *nt = qp->transport;
  469. struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
  470. if (qp->qp_link == NTB_LINK_DOWN) {
  471. cancel_delayed_work_sync(&qp->link_work);
  472. return;
  473. }
  474. if (qp->event_handler)
  475. qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
  476. dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
  477. qp->qp_link = NTB_LINK_DOWN;
  478. if (nt->transport_link == NTB_LINK_UP)
  479. schedule_delayed_work(&qp->link_work,
  480. msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
  481. }
  482. static void ntb_qp_link_down(struct ntb_transport_qp *qp)
  483. {
  484. schedule_work(&qp->link_cleanup);
  485. }
  486. static void ntb_transport_link_cleanup(struct work_struct *work)
  487. {
  488. struct ntb_transport *nt = container_of(work, struct ntb_transport,
  489. link_cleanup);
  490. int i;
  491. if (nt->transport_link == NTB_LINK_DOWN)
  492. cancel_delayed_work_sync(&nt->link_work);
  493. else
  494. nt->transport_link = NTB_LINK_DOWN;
  495. /* Pass along the info to any clients */
  496. for (i = 0; i < nt->max_qps; i++)
  497. if (!test_bit(i, &nt->qp_bitmap))
  498. ntb_qp_link_down(&nt->qps[i]);
  499. /* The scratchpad registers keep the values if the remote side
  500. * goes down, blast them now to give them a sane value the next
  501. * time they are accessed
  502. */
  503. for (i = 0; i < MAX_SPAD; i++)
  504. ntb_write_local_spad(nt->ndev, i, 0);
  505. }
  506. static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
  507. {
  508. struct ntb_transport *nt = data;
  509. switch (event) {
  510. case NTB_EVENT_HW_LINK_UP:
  511. schedule_delayed_work(&nt->link_work, 0);
  512. break;
  513. case NTB_EVENT_HW_LINK_DOWN:
  514. schedule_work(&nt->link_cleanup);
  515. break;
  516. default:
  517. BUG();
  518. }
  519. }
  520. static void ntb_transport_link_work(struct work_struct *work)
  521. {
  522. struct ntb_transport *nt = container_of(work, struct ntb_transport,
  523. link_work.work);
  524. struct ntb_device *ndev = nt->ndev;
  525. struct pci_dev *pdev = ntb_query_pdev(ndev);
  526. u32 val;
  527. int rc, i;
  528. /* send the local info, in the opposite order of the way we read it */
  529. for (i = 0; i < NTB_NUM_MW; i++) {
  530. rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
  531. ntb_get_mw_size(ndev, i) >> 32);
  532. if (rc) {
  533. dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
  534. (u32)(ntb_get_mw_size(ndev, i) >> 32),
  535. MW0_SZ_HIGH + (i * 2));
  536. goto out;
  537. }
  538. rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
  539. (u32) ntb_get_mw_size(ndev, i));
  540. if (rc) {
  541. dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
  542. (u32) ntb_get_mw_size(ndev, i),
  543. MW0_SZ_LOW + (i * 2));
  544. goto out;
  545. }
  546. }
  547. rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW);
  548. if (rc) {
  549. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  550. NTB_NUM_MW, NUM_MWS);
  551. goto out;
  552. }
  553. rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
  554. if (rc) {
  555. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  556. nt->max_qps, NUM_QPS);
  557. goto out;
  558. }
  559. rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
  560. if (rc) {
  561. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  562. NTB_TRANSPORT_VERSION, VERSION);
  563. goto out;
  564. }
  565. /* Query the remote side for its info */
  566. rc = ntb_read_remote_spad(ndev, VERSION, &val);
  567. if (rc) {
  568. dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
  569. goto out;
  570. }
  571. if (val != NTB_TRANSPORT_VERSION)
  572. goto out;
  573. dev_dbg(&pdev->dev, "Remote version = %d\n", val);
  574. rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
  575. if (rc) {
  576. dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
  577. goto out;
  578. }
  579. if (val != nt->max_qps)
  580. goto out;
  581. dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
  582. rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
  583. if (rc) {
  584. dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
  585. goto out;
  586. }
  587. if (val != NTB_NUM_MW)
  588. goto out;
  589. dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
  590. for (i = 0; i < NTB_NUM_MW; i++) {
  591. u64 val64;
  592. rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
  593. if (rc) {
  594. dev_err(&pdev->dev, "Error reading remote spad %d\n",
  595. MW0_SZ_HIGH + (i * 2));
  596. goto out1;
  597. }
  598. val64 = (u64) val << 32;
  599. rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
  600. if (rc) {
  601. dev_err(&pdev->dev, "Error reading remote spad %d\n",
  602. MW0_SZ_LOW + (i * 2));
  603. goto out1;
  604. }
  605. val64 |= val;
  606. dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
  607. rc = ntb_set_mw(nt, i, val64);
  608. if (rc)
  609. goto out1;
  610. }
  611. nt->transport_link = NTB_LINK_UP;
  612. for (i = 0; i < nt->max_qps; i++) {
  613. struct ntb_transport_qp *qp = &nt->qps[i];
  614. ntb_transport_setup_qp_mw(nt, i);
  615. if (qp->client_ready == NTB_LINK_UP)
  616. schedule_delayed_work(&qp->link_work, 0);
  617. }
  618. return;
  619. out1:
  620. for (i = 0; i < NTB_NUM_MW; i++)
  621. ntb_free_mw(nt, i);
  622. out:
  623. if (ntb_hw_link_status(ndev))
  624. schedule_delayed_work(&nt->link_work,
  625. msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
  626. }
  627. static void ntb_qp_link_work(struct work_struct *work)
  628. {
  629. struct ntb_transport_qp *qp = container_of(work,
  630. struct ntb_transport_qp,
  631. link_work.work);
  632. struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
  633. struct ntb_transport *nt = qp->transport;
  634. int rc, val;
  635. WARN_ON(nt->transport_link != NTB_LINK_UP);
  636. rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
  637. if (rc) {
  638. dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
  639. return;
  640. }
  641. rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
  642. if (rc)
  643. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  644. val | 1 << qp->qp_num, QP_LINKS);
  645. /* query remote spad for qp ready bits */
  646. rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
  647. if (rc)
  648. dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
  649. dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
  650. /* See if the remote side is up */
  651. if (1 << qp->qp_num & val) {
  652. qp->qp_link = NTB_LINK_UP;
  653. dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
  654. if (qp->event_handler)
  655. qp->event_handler(qp->cb_data, NTB_LINK_UP);
  656. } else if (nt->transport_link == NTB_LINK_UP)
  657. schedule_delayed_work(&qp->link_work,
  658. msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
  659. }
  660. static void ntb_transport_init_queue(struct ntb_transport *nt,
  661. unsigned int qp_num)
  662. {
  663. struct ntb_transport_qp *qp;
  664. unsigned int num_qps_mw, tx_size;
  665. u8 mw_num = QP_TO_MW(qp_num);
  666. qp = &nt->qps[qp_num];
  667. qp->qp_num = qp_num;
  668. qp->transport = nt;
  669. qp->ndev = nt->ndev;
  670. qp->qp_link = NTB_LINK_DOWN;
  671. qp->client_ready = NTB_LINK_DOWN;
  672. qp->event_handler = NULL;
  673. if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
  674. num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
  675. else
  676. num_qps_mw = nt->max_qps / NTB_NUM_MW;
  677. tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
  678. qp->rx_info = ntb_get_mw_vbase(nt->ndev, mw_num) +
  679. (qp_num / NTB_NUM_MW * tx_size);
  680. tx_size -= sizeof(struct ntb_rx_info);
  681. qp->tx_mw = qp->rx_info + 1;
  682. qp->tx_max_frame = min(transport_mtu, tx_size);
  683. qp->tx_max_entry = tx_size / qp->tx_max_frame;
  684. qp->tx_index = 0;
  685. if (nt->debugfs_dir) {
  686. char debugfs_name[4];
  687. snprintf(debugfs_name, 4, "qp%d", qp_num);
  688. qp->debugfs_dir = debugfs_create_dir(debugfs_name,
  689. nt->debugfs_dir);
  690. qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
  691. qp->debugfs_dir, qp,
  692. &ntb_qp_debugfs_stats);
  693. }
  694. INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
  695. INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
  696. spin_lock_init(&qp->ntb_rx_pend_q_lock);
  697. spin_lock_init(&qp->ntb_rx_free_q_lock);
  698. spin_lock_init(&qp->ntb_tx_free_q_lock);
  699. INIT_LIST_HEAD(&qp->rx_pend_q);
  700. INIT_LIST_HEAD(&qp->rx_free_q);
  701. INIT_LIST_HEAD(&qp->tx_free_q);
  702. }
  703. int ntb_transport_init(struct pci_dev *pdev)
  704. {
  705. struct ntb_transport *nt;
  706. int rc, i;
  707. nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
  708. if (!nt)
  709. return -ENOMEM;
  710. if (debugfs_initialized())
  711. nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
  712. else
  713. nt->debugfs_dir = NULL;
  714. nt->ndev = ntb_register_transport(pdev, nt);
  715. if (!nt->ndev) {
  716. rc = -EIO;
  717. goto err;
  718. }
  719. nt->max_qps = min(nt->ndev->max_cbs, max_num_clients);
  720. nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
  721. GFP_KERNEL);
  722. if (!nt->qps) {
  723. rc = -ENOMEM;
  724. goto err1;
  725. }
  726. nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
  727. for (i = 0; i < nt->max_qps; i++)
  728. ntb_transport_init_queue(nt, i);
  729. INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
  730. INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
  731. rc = ntb_register_event_callback(nt->ndev,
  732. ntb_transport_event_callback);
  733. if (rc)
  734. goto err2;
  735. INIT_LIST_HEAD(&nt->client_devs);
  736. rc = ntb_bus_init(nt);
  737. if (rc)
  738. goto err3;
  739. if (ntb_hw_link_status(nt->ndev))
  740. schedule_delayed_work(&nt->link_work, 0);
  741. return 0;
  742. err3:
  743. ntb_unregister_event_callback(nt->ndev);
  744. err2:
  745. kfree(nt->qps);
  746. err1:
  747. ntb_unregister_transport(nt->ndev);
  748. err:
  749. debugfs_remove_recursive(nt->debugfs_dir);
  750. kfree(nt);
  751. return rc;
  752. }
  753. void ntb_transport_free(void *transport)
  754. {
  755. struct ntb_transport *nt = transport;
  756. struct pci_dev *pdev;
  757. int i;
  758. nt->transport_link = NTB_LINK_DOWN;
  759. /* verify that all the qp's are freed */
  760. for (i = 0; i < nt->max_qps; i++)
  761. if (!test_bit(i, &nt->qp_bitmap))
  762. ntb_transport_free_queue(&nt->qps[i]);
  763. ntb_bus_remove(nt);
  764. cancel_delayed_work_sync(&nt->link_work);
  765. debugfs_remove_recursive(nt->debugfs_dir);
  766. ntb_unregister_event_callback(nt->ndev);
  767. pdev = ntb_query_pdev(nt->ndev);
  768. for (i = 0; i < NTB_NUM_MW; i++)
  769. ntb_free_mw(nt, i);
  770. kfree(nt->qps);
  771. ntb_unregister_transport(nt->ndev);
  772. kfree(nt);
  773. }
  774. static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
  775. struct ntb_queue_entry *entry, void *offset)
  776. {
  777. void *cb_data = entry->cb_data;
  778. unsigned int len = entry->len;
  779. memcpy(entry->buf, offset, entry->len);
  780. ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
  781. if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
  782. qp->rx_handler(qp, qp->cb_data, cb_data, len);
  783. }
  784. static int ntb_process_rxc(struct ntb_transport_qp *qp)
  785. {
  786. struct ntb_payload_header *hdr;
  787. struct ntb_queue_entry *entry;
  788. void *offset;
  789. offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
  790. hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
  791. entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
  792. if (!entry) {
  793. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  794. "no buffer - HDR ver %u, len %d, flags %x\n",
  795. hdr->ver, hdr->len, hdr->flags);
  796. qp->rx_err_no_buf++;
  797. return -ENOMEM;
  798. }
  799. if (!(hdr->flags & DESC_DONE_FLAG)) {
  800. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  801. &qp->rx_pend_q);
  802. qp->rx_ring_empty++;
  803. return -EAGAIN;
  804. }
  805. if (hdr->ver != (u32) qp->rx_pkts) {
  806. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  807. "qp %d: version mismatch, expected %llu - got %u\n",
  808. qp->qp_num, qp->rx_pkts, hdr->ver);
  809. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  810. &qp->rx_pend_q);
  811. qp->rx_err_ver++;
  812. return -EIO;
  813. }
  814. if (hdr->flags & LINK_DOWN_FLAG) {
  815. ntb_qp_link_down(qp);
  816. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  817. &qp->rx_pend_q);
  818. goto out;
  819. }
  820. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  821. "rx offset %u, ver %u - %d payload received, buf size %d\n",
  822. qp->rx_index, hdr->ver, hdr->len, entry->len);
  823. if (hdr->len <= entry->len) {
  824. entry->len = hdr->len;
  825. ntb_rx_copy_task(qp, entry, offset);
  826. } else {
  827. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  828. &qp->rx_pend_q);
  829. qp->rx_err_oflow++;
  830. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  831. "RX overflow! Wanted %d got %d\n",
  832. hdr->len, entry->len);
  833. }
  834. qp->rx_bytes += hdr->len;
  835. qp->rx_pkts++;
  836. out:
  837. /* Ensure that the data is fully copied out before clearing the flag */
  838. wmb();
  839. hdr->flags = 0;
  840. iowrite32(qp->rx_index, &qp->rx_info->entry);
  841. qp->rx_index++;
  842. qp->rx_index %= qp->rx_max_entry;
  843. return 0;
  844. }
  845. static void ntb_transport_rx(unsigned long data)
  846. {
  847. struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
  848. int rc;
  849. do {
  850. rc = ntb_process_rxc(qp);
  851. } while (!rc);
  852. }
  853. static void ntb_transport_rxc_db(void *data, int db_num)
  854. {
  855. struct ntb_transport_qp *qp = data;
  856. dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
  857. __func__, db_num);
  858. tasklet_schedule(&qp->rx_work);
  859. }
  860. static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
  861. struct ntb_queue_entry *entry,
  862. void __iomem *offset)
  863. {
  864. struct ntb_payload_header __iomem *hdr;
  865. memcpy_toio(offset, entry->buf, entry->len);
  866. hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
  867. iowrite32(entry->len, &hdr->len);
  868. iowrite32((u32) qp->tx_pkts, &hdr->ver);
  869. /* Ensure that the data is fully copied out before setting the flag */
  870. wmb();
  871. iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
  872. ntb_ring_sdb(qp->ndev, qp->qp_num);
  873. /* The entry length can only be zero if the packet is intended to be a
  874. * "link down" or similar. Since no payload is being sent in these
  875. * cases, there is nothing to add to the completion queue.
  876. */
  877. if (entry->len > 0) {
  878. qp->tx_bytes += entry->len;
  879. if (qp->tx_handler)
  880. qp->tx_handler(qp, qp->cb_data, entry->cb_data,
  881. entry->len);
  882. }
  883. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
  884. }
  885. static int ntb_process_tx(struct ntb_transport_qp *qp,
  886. struct ntb_queue_entry *entry)
  887. {
  888. void __iomem *offset;
  889. offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
  890. dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %u, entry len %d flags %x buff %p\n",
  891. qp->tx_pkts, offset, qp->tx_index, entry->len, entry->flags,
  892. entry->buf);
  893. if (qp->tx_index == qp->remote_rx_info->entry) {
  894. qp->tx_ring_full++;
  895. return -EAGAIN;
  896. }
  897. if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
  898. if (qp->tx_handler)
  899. qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
  900. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
  901. &qp->tx_free_q);
  902. return 0;
  903. }
  904. ntb_tx_copy_task(qp, entry, offset);
  905. qp->tx_index++;
  906. qp->tx_index %= qp->tx_max_entry;
  907. qp->tx_pkts++;
  908. return 0;
  909. }
  910. static void ntb_send_link_down(struct ntb_transport_qp *qp)
  911. {
  912. struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
  913. struct ntb_queue_entry *entry;
  914. int i, rc;
  915. if (qp->qp_link == NTB_LINK_DOWN)
  916. return;
  917. qp->qp_link = NTB_LINK_DOWN;
  918. dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
  919. for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
  920. entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
  921. if (entry)
  922. break;
  923. msleep(100);
  924. }
  925. if (!entry)
  926. return;
  927. entry->cb_data = NULL;
  928. entry->buf = NULL;
  929. entry->len = 0;
  930. entry->flags = LINK_DOWN_FLAG;
  931. rc = ntb_process_tx(qp, entry);
  932. if (rc)
  933. dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
  934. qp->qp_num);
  935. }
  936. /**
  937. * ntb_transport_create_queue - Create a new NTB transport layer queue
  938. * @rx_handler: receive callback function
  939. * @tx_handler: transmit callback function
  940. * @event_handler: event callback function
  941. *
  942. * Create a new NTB transport layer queue and provide the queue with a callback
  943. * routine for both transmit and receive. The receive callback routine will be
  944. * used to pass up data when the transport has received it on the queue. The
  945. * transmit callback routine will be called when the transport has completed the
  946. * transmission of the data on the queue and the data is ready to be freed.
  947. *
  948. * RETURNS: pointer to newly created ntb_queue, NULL on error.
  949. */
  950. struct ntb_transport_qp *
  951. ntb_transport_create_queue(void *data, struct pci_dev *pdev,
  952. const struct ntb_queue_handlers *handlers)
  953. {
  954. struct ntb_queue_entry *entry;
  955. struct ntb_transport_qp *qp;
  956. struct ntb_transport *nt;
  957. unsigned int free_queue;
  958. int rc, i;
  959. nt = ntb_find_transport(pdev);
  960. if (!nt)
  961. goto err;
  962. free_queue = ffs(nt->qp_bitmap);
  963. if (!free_queue)
  964. goto err;
  965. /* decrement free_queue to make it zero based */
  966. free_queue--;
  967. clear_bit(free_queue, &nt->qp_bitmap);
  968. qp = &nt->qps[free_queue];
  969. qp->cb_data = data;
  970. qp->rx_handler = handlers->rx_handler;
  971. qp->tx_handler = handlers->tx_handler;
  972. qp->event_handler = handlers->event_handler;
  973. for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
  974. entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
  975. if (!entry)
  976. goto err1;
  977. ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
  978. &qp->rx_free_q);
  979. }
  980. for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
  981. entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
  982. if (!entry)
  983. goto err2;
  984. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
  985. &qp->tx_free_q);
  986. }
  987. tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
  988. rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
  989. ntb_transport_rxc_db);
  990. if (rc)
  991. goto err3;
  992. dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
  993. return qp;
  994. err3:
  995. tasklet_disable(&qp->rx_work);
  996. err2:
  997. while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
  998. kfree(entry);
  999. err1:
  1000. while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
  1001. kfree(entry);
  1002. set_bit(free_queue, &nt->qp_bitmap);
  1003. err:
  1004. return NULL;
  1005. }
  1006. EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
  1007. /**
  1008. * ntb_transport_free_queue - Frees NTB transport queue
  1009. * @qp: NTB queue to be freed
  1010. *
  1011. * Frees NTB transport queue
  1012. */
  1013. void ntb_transport_free_queue(struct ntb_transport_qp *qp)
  1014. {
  1015. struct pci_dev *pdev;
  1016. struct ntb_queue_entry *entry;
  1017. if (!qp)
  1018. return;
  1019. pdev = ntb_query_pdev(qp->ndev);
  1020. cancel_delayed_work_sync(&qp->link_work);
  1021. ntb_unregister_db_callback(qp->ndev, qp->qp_num);
  1022. tasklet_disable(&qp->rx_work);
  1023. while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
  1024. kfree(entry);
  1025. while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
  1026. dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
  1027. kfree(entry);
  1028. }
  1029. while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
  1030. kfree(entry);
  1031. set_bit(qp->qp_num, &qp->transport->qp_bitmap);
  1032. dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
  1033. }
  1034. EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
  1035. /**
  1036. * ntb_transport_rx_remove - Dequeues enqueued rx packet
  1037. * @qp: NTB queue to be freed
  1038. * @len: pointer to variable to write enqueued buffers length
  1039. *
  1040. * Dequeues unused buffers from receive queue. Should only be used during
  1041. * shutdown of qp.
  1042. *
  1043. * RETURNS: NULL error value on error, or void* for success.
  1044. */
  1045. void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
  1046. {
  1047. struct ntb_queue_entry *entry;
  1048. void *buf;
  1049. if (!qp || qp->client_ready == NTB_LINK_UP)
  1050. return NULL;
  1051. entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
  1052. if (!entry)
  1053. return NULL;
  1054. buf = entry->cb_data;
  1055. *len = entry->len;
  1056. ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
  1057. return buf;
  1058. }
  1059. EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
  1060. /**
  1061. * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
  1062. * @qp: NTB transport layer queue the entry is to be enqueued on
  1063. * @cb: per buffer pointer for callback function to use
  1064. * @data: pointer to data buffer that incoming packets will be copied into
  1065. * @len: length of the data buffer
  1066. *
  1067. * Enqueue a new receive buffer onto the transport queue into which a NTB
  1068. * payload can be received into.
  1069. *
  1070. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  1071. */
  1072. int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
  1073. unsigned int len)
  1074. {
  1075. struct ntb_queue_entry *entry;
  1076. if (!qp)
  1077. return -EINVAL;
  1078. entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
  1079. if (!entry)
  1080. return -ENOMEM;
  1081. entry->cb_data = cb;
  1082. entry->buf = data;
  1083. entry->len = len;
  1084. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
  1085. return 0;
  1086. }
  1087. EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
  1088. /**
  1089. * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
  1090. * @qp: NTB transport layer queue the entry is to be enqueued on
  1091. * @cb: per buffer pointer for callback function to use
  1092. * @data: pointer to data buffer that will be sent
  1093. * @len: length of the data buffer
  1094. *
  1095. * Enqueue a new transmit buffer onto the transport queue from which a NTB
  1096. * payload will be transmitted. This assumes that a lock is behing held to
  1097. * serialize access to the qp.
  1098. *
  1099. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  1100. */
  1101. int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
  1102. unsigned int len)
  1103. {
  1104. struct ntb_queue_entry *entry;
  1105. int rc;
  1106. if (!qp || qp->qp_link != NTB_LINK_UP || !len)
  1107. return -EINVAL;
  1108. entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
  1109. if (!entry)
  1110. return -ENOMEM;
  1111. entry->cb_data = cb;
  1112. entry->buf = data;
  1113. entry->len = len;
  1114. entry->flags = 0;
  1115. rc = ntb_process_tx(qp, entry);
  1116. if (rc)
  1117. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
  1118. &qp->tx_free_q);
  1119. return rc;
  1120. }
  1121. EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
  1122. /**
  1123. * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
  1124. * @qp: NTB transport layer queue to be enabled
  1125. *
  1126. * Notify NTB transport layer of client readiness to use queue
  1127. */
  1128. void ntb_transport_link_up(struct ntb_transport_qp *qp)
  1129. {
  1130. if (!qp)
  1131. return;
  1132. qp->client_ready = NTB_LINK_UP;
  1133. if (qp->transport->transport_link == NTB_LINK_UP)
  1134. schedule_delayed_work(&qp->link_work, 0);
  1135. }
  1136. EXPORT_SYMBOL_GPL(ntb_transport_link_up);
  1137. /**
  1138. * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
  1139. * @qp: NTB transport layer queue to be disabled
  1140. *
  1141. * Notify NTB transport layer of client's desire to no longer receive data on
  1142. * transport queue specified. It is the client's responsibility to ensure all
  1143. * entries on queue are purged or otherwise handled appropraitely.
  1144. */
  1145. void ntb_transport_link_down(struct ntb_transport_qp *qp)
  1146. {
  1147. struct pci_dev *pdev;
  1148. int rc, val;
  1149. if (!qp)
  1150. return;
  1151. pdev = ntb_query_pdev(qp->ndev);
  1152. qp->client_ready = NTB_LINK_DOWN;
  1153. rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
  1154. if (rc) {
  1155. dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
  1156. return;
  1157. }
  1158. rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
  1159. val & ~(1 << qp->qp_num));
  1160. if (rc)
  1161. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  1162. val & ~(1 << qp->qp_num), QP_LINKS);
  1163. if (qp->qp_link == NTB_LINK_UP)
  1164. ntb_send_link_down(qp);
  1165. else
  1166. cancel_delayed_work_sync(&qp->link_work);
  1167. }
  1168. EXPORT_SYMBOL_GPL(ntb_transport_link_down);
  1169. /**
  1170. * ntb_transport_link_query - Query transport link state
  1171. * @qp: NTB transport layer queue to be queried
  1172. *
  1173. * Query connectivity to the remote system of the NTB transport queue
  1174. *
  1175. * RETURNS: true for link up or false for link down
  1176. */
  1177. bool ntb_transport_link_query(struct ntb_transport_qp *qp)
  1178. {
  1179. if (!qp)
  1180. return false;
  1181. return qp->qp_link == NTB_LINK_UP;
  1182. }
  1183. EXPORT_SYMBOL_GPL(ntb_transport_link_query);
  1184. /**
  1185. * ntb_transport_qp_num - Query the qp number
  1186. * @qp: NTB transport layer queue to be queried
  1187. *
  1188. * Query qp number of the NTB transport queue
  1189. *
  1190. * RETURNS: a zero based number specifying the qp number
  1191. */
  1192. unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
  1193. {
  1194. if (!qp)
  1195. return 0;
  1196. return qp->qp_num;
  1197. }
  1198. EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
  1199. /**
  1200. * ntb_transport_max_size - Query the max payload size of a qp
  1201. * @qp: NTB transport layer queue to be queried
  1202. *
  1203. * Query the maximum payload size permissible on the given qp
  1204. *
  1205. * RETURNS: the max payload size of a qp
  1206. */
  1207. unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
  1208. {
  1209. if (!qp)
  1210. return 0;
  1211. return qp->tx_max_frame - sizeof(struct ntb_payload_header);
  1212. }
  1213. EXPORT_SYMBOL_GPL(ntb_transport_max_size);