ntb_transport.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2012 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * BSD LICENSE
  14. *
  15. * Copyright(c) 2012 Intel Corporation. All rights reserved.
  16. *
  17. * Redistribution and use in source and binary forms, with or without
  18. * modification, are permitted provided that the following conditions
  19. * are met:
  20. *
  21. * * Redistributions of source code must retain the above copyright
  22. * notice, this list of conditions and the following disclaimer.
  23. * * Redistributions in binary form must reproduce the above copy
  24. * notice, this list of conditions and the following disclaimer in
  25. * the documentation and/or other materials provided with the
  26. * distribution.
  27. * * Neither the name of Intel Corporation nor the names of its
  28. * contributors may be used to endorse or promote products derived
  29. * from this software without specific prior written permission.
  30. *
  31. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  32. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  33. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  34. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  35. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  36. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  37. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  38. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  39. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  40. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  41. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42. *
  43. * Intel PCIe NTB Linux driver
  44. *
  45. * Contact Information:
  46. * Jon Mason <jon.mason@intel.com>
  47. */
  48. #include <linux/debugfs.h>
  49. #include <linux/delay.h>
  50. #include <linux/dma-mapping.h>
  51. #include <linux/errno.h>
  52. #include <linux/export.h>
  53. #include <linux/interrupt.h>
  54. #include <linux/module.h>
  55. #include <linux/pci.h>
  56. #include <linux/slab.h>
  57. #include <linux/types.h>
  58. #include <linux/ntb.h>
  59. #include "ntb_hw.h"
  60. #define NTB_TRANSPORT_VERSION 1
  61. static unsigned int transport_mtu = 0x401E;
  62. module_param(transport_mtu, uint, 0644);
  63. MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
  64. static unsigned char max_num_clients = 2;
  65. module_param(max_num_clients, byte, 0644);
  66. MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
  67. struct ntb_queue_entry {
  68. /* ntb_queue list reference */
  69. struct list_head entry;
  70. /* pointers to data to be transfered */
  71. void *cb_data;
  72. void *buf;
  73. unsigned int len;
  74. unsigned int flags;
  75. };
  76. struct ntb_transport_qp {
  77. struct ntb_transport *transport;
  78. struct ntb_device *ndev;
  79. void *cb_data;
  80. bool client_ready;
  81. bool qp_link;
  82. u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
  83. void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
  84. void *data, int len);
  85. struct list_head tx_free_q;
  86. spinlock_t ntb_tx_free_q_lock;
  87. void *tx_mw_begin;
  88. void *tx_mw_end;
  89. void *tx_offset;
  90. unsigned int tx_max_frame;
  91. void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
  92. void *data, int len);
  93. struct tasklet_struct rx_work;
  94. struct list_head rx_pend_q;
  95. struct list_head rx_free_q;
  96. spinlock_t ntb_rx_pend_q_lock;
  97. spinlock_t ntb_rx_free_q_lock;
  98. void *rx_buff_begin;
  99. void *rx_buff_end;
  100. void *rx_offset;
  101. unsigned int rx_max_frame;
  102. void (*event_handler) (void *data, int status);
  103. struct delayed_work link_work;
  104. struct work_struct link_cleanup;
  105. struct dentry *debugfs_dir;
  106. struct dentry *debugfs_stats;
  107. /* Stats */
  108. u64 rx_bytes;
  109. u64 rx_pkts;
  110. u64 rx_ring_empty;
  111. u64 rx_err_no_buf;
  112. u64 rx_err_oflow;
  113. u64 rx_err_ver;
  114. u64 tx_bytes;
  115. u64 tx_pkts;
  116. u64 tx_ring_full;
  117. };
  118. struct ntb_transport_mw {
  119. size_t size;
  120. void *virt_addr;
  121. dma_addr_t dma_addr;
  122. };
  123. struct ntb_transport_client_dev {
  124. struct list_head entry;
  125. struct device dev;
  126. };
  127. struct ntb_transport {
  128. struct list_head entry;
  129. struct list_head client_devs;
  130. struct ntb_device *ndev;
  131. struct ntb_transport_mw mw[NTB_NUM_MW];
  132. struct ntb_transport_qp *qps;
  133. unsigned int max_qps;
  134. unsigned long qp_bitmap;
  135. bool transport_link;
  136. struct delayed_work link_work;
  137. struct work_struct link_cleanup;
  138. struct dentry *debugfs_dir;
  139. };
  140. enum {
  141. DESC_DONE_FLAG = 1 << 0,
  142. LINK_DOWN_FLAG = 1 << 1,
  143. };
  144. struct ntb_payload_header {
  145. u64 ver;
  146. unsigned int len;
  147. unsigned int flags;
  148. };
  149. enum {
  150. VERSION = 0,
  151. MW0_SZ,
  152. MW1_SZ,
  153. NUM_QPS,
  154. QP_LINKS,
  155. MAX_SPAD,
  156. };
  157. #define QP_TO_MW(qp) ((qp) % NTB_NUM_MW)
  158. #define NTB_QP_DEF_NUM_ENTRIES 100
  159. #define NTB_LINK_DOWN_TIMEOUT 10
  160. static int ntb_match_bus(struct device *dev, struct device_driver *drv)
  161. {
  162. return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
  163. }
  164. static int ntb_client_probe(struct device *dev)
  165. {
  166. const struct ntb_client *drv = container_of(dev->driver,
  167. struct ntb_client, driver);
  168. struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
  169. int rc = -EINVAL;
  170. get_device(dev);
  171. if (drv && drv->probe)
  172. rc = drv->probe(pdev);
  173. if (rc)
  174. put_device(dev);
  175. return rc;
  176. }
  177. static int ntb_client_remove(struct device *dev)
  178. {
  179. const struct ntb_client *drv = container_of(dev->driver,
  180. struct ntb_client, driver);
  181. struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
  182. if (drv && drv->remove)
  183. drv->remove(pdev);
  184. put_device(dev);
  185. return 0;
  186. }
  187. static struct bus_type ntb_bus_type = {
  188. .name = "ntb_bus",
  189. .match = ntb_match_bus,
  190. .probe = ntb_client_probe,
  191. .remove = ntb_client_remove,
  192. };
  193. static LIST_HEAD(ntb_transport_list);
  194. static int ntb_bus_init(struct ntb_transport *nt)
  195. {
  196. if (list_empty(&ntb_transport_list)) {
  197. int rc = bus_register(&ntb_bus_type);
  198. if (rc)
  199. return rc;
  200. }
  201. list_add(&nt->entry, &ntb_transport_list);
  202. return 0;
  203. }
  204. static void ntb_bus_remove(struct ntb_transport *nt)
  205. {
  206. struct ntb_transport_client_dev *client_dev, *cd;
  207. list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
  208. dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
  209. dev_name(&client_dev->dev));
  210. list_del(&client_dev->entry);
  211. device_unregister(&client_dev->dev);
  212. }
  213. list_del(&nt->entry);
  214. if (list_empty(&ntb_transport_list))
  215. bus_unregister(&ntb_bus_type);
  216. }
  217. static void ntb_client_release(struct device *dev)
  218. {
  219. struct ntb_transport_client_dev *client_dev;
  220. client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
  221. kfree(client_dev);
  222. }
  223. /**
  224. * ntb_unregister_client_dev - Unregister NTB client device
  225. * @device_name: Name of NTB client device
  226. *
  227. * Unregister an NTB client device with the NTB transport layer
  228. */
  229. void ntb_unregister_client_dev(char *device_name)
  230. {
  231. struct ntb_transport_client_dev *client, *cd;
  232. struct ntb_transport *nt;
  233. list_for_each_entry(nt, &ntb_transport_list, entry)
  234. list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
  235. if (!strncmp(dev_name(&client->dev), device_name,
  236. strlen(device_name))) {
  237. list_del(&client->entry);
  238. device_unregister(&client->dev);
  239. }
  240. }
  241. EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
  242. /**
  243. * ntb_register_client_dev - Register NTB client device
  244. * @device_name: Name of NTB client device
  245. *
  246. * Register an NTB client device with the NTB transport layer
  247. */
  248. int ntb_register_client_dev(char *device_name)
  249. {
  250. struct ntb_transport_client_dev *client_dev;
  251. struct ntb_transport *nt;
  252. int rc;
  253. if (list_empty(&ntb_transport_list))
  254. return -ENODEV;
  255. list_for_each_entry(nt, &ntb_transport_list, entry) {
  256. struct device *dev;
  257. client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
  258. GFP_KERNEL);
  259. if (!client_dev) {
  260. rc = -ENOMEM;
  261. goto err;
  262. }
  263. dev = &client_dev->dev;
  264. /* setup and register client devices */
  265. dev_set_name(dev, "%s", device_name);
  266. dev->bus = &ntb_bus_type;
  267. dev->release = ntb_client_release;
  268. dev->parent = &ntb_query_pdev(nt->ndev)->dev;
  269. rc = device_register(dev);
  270. if (rc) {
  271. kfree(client_dev);
  272. goto err;
  273. }
  274. list_add_tail(&client_dev->entry, &nt->client_devs);
  275. }
  276. return 0;
  277. err:
  278. ntb_unregister_client_dev(device_name);
  279. return rc;
  280. }
  281. EXPORT_SYMBOL_GPL(ntb_register_client_dev);
  282. /**
  283. * ntb_register_client - Register NTB client driver
  284. * @drv: NTB client driver to be registered
  285. *
  286. * Register an NTB client driver with the NTB transport layer
  287. *
  288. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  289. */
  290. int ntb_register_client(struct ntb_client *drv)
  291. {
  292. drv->driver.bus = &ntb_bus_type;
  293. if (list_empty(&ntb_transport_list))
  294. return -ENODEV;
  295. return driver_register(&drv->driver);
  296. }
  297. EXPORT_SYMBOL_GPL(ntb_register_client);
  298. /**
  299. * ntb_unregister_client - Unregister NTB client driver
  300. * @drv: NTB client driver to be unregistered
  301. *
  302. * Unregister an NTB client driver with the NTB transport layer
  303. *
  304. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  305. */
  306. void ntb_unregister_client(struct ntb_client *drv)
  307. {
  308. driver_unregister(&drv->driver);
  309. }
  310. EXPORT_SYMBOL_GPL(ntb_unregister_client);
  311. static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
  312. loff_t *offp)
  313. {
  314. struct ntb_transport_qp *qp;
  315. char buf[1024];
  316. ssize_t ret, out_offset, out_count;
  317. out_count = 1024;
  318. qp = filp->private_data;
  319. out_offset = 0;
  320. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  321. "NTB QP stats\n");
  322. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  323. "rx_bytes - \t%llu\n", qp->rx_bytes);
  324. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  325. "rx_pkts - \t%llu\n", qp->rx_pkts);
  326. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  327. "rx_ring_empty - %llu\n", qp->rx_ring_empty);
  328. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  329. "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
  330. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  331. "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
  332. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  333. "rx_err_ver - \t%llu\n", qp->rx_err_ver);
  334. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  335. "rx_buff_begin - %p\n", qp->rx_buff_begin);
  336. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  337. "rx_offset - \t%p\n", qp->rx_offset);
  338. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  339. "rx_buff_end - \t%p\n", qp->rx_buff_end);
  340. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  341. "tx_bytes - \t%llu\n", qp->tx_bytes);
  342. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  343. "tx_pkts - \t%llu\n", qp->tx_pkts);
  344. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  345. "tx_ring_full - \t%llu\n", qp->tx_ring_full);
  346. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  347. "tx_mw_begin - \t%p\n", qp->tx_mw_begin);
  348. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  349. "tx_offset - \t%p\n", qp->tx_offset);
  350. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  351. "tx_mw_end - \t%p\n", qp->tx_mw_end);
  352. out_offset += snprintf(buf + out_offset, out_count - out_offset,
  353. "QP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
  354. "Up" : "Down");
  355. ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
  356. return ret;
  357. }
  358. static const struct file_operations ntb_qp_debugfs_stats = {
  359. .owner = THIS_MODULE,
  360. .open = simple_open,
  361. .read = debugfs_read,
  362. };
  363. static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
  364. struct list_head *list)
  365. {
  366. unsigned long flags;
  367. spin_lock_irqsave(lock, flags);
  368. list_add_tail(entry, list);
  369. spin_unlock_irqrestore(lock, flags);
  370. }
  371. static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
  372. struct list_head *list)
  373. {
  374. struct ntb_queue_entry *entry;
  375. unsigned long flags;
  376. spin_lock_irqsave(lock, flags);
  377. if (list_empty(list)) {
  378. entry = NULL;
  379. goto out;
  380. }
  381. entry = list_first_entry(list, struct ntb_queue_entry, entry);
  382. list_del(&entry->entry);
  383. out:
  384. spin_unlock_irqrestore(lock, flags);
  385. return entry;
  386. }
  387. static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
  388. unsigned int qp_num)
  389. {
  390. struct ntb_transport_qp *qp = &nt->qps[qp_num];
  391. unsigned int rx_size, num_qps_mw;
  392. u8 mw_num = QP_TO_MW(qp_num);
  393. void *offset;
  394. WARN_ON(nt->mw[mw_num].virt_addr == 0);
  395. if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
  396. num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
  397. else
  398. num_qps_mw = nt->max_qps / NTB_NUM_MW;
  399. rx_size = nt->mw[mw_num].size / num_qps_mw;
  400. qp->rx_buff_begin = nt->mw[mw_num].virt_addr +
  401. (qp_num / NTB_NUM_MW * rx_size);
  402. qp->rx_buff_end = qp->rx_buff_begin + rx_size;
  403. qp->rx_offset = qp->rx_buff_begin;
  404. qp->rx_max_frame = min(transport_mtu, rx_size);
  405. /* setup the hdr offsets with 0's */
  406. for (offset = qp->rx_buff_begin + qp->rx_max_frame -
  407. sizeof(struct ntb_payload_header);
  408. offset < qp->rx_buff_end; offset += qp->rx_max_frame)
  409. memset(offset, 0, sizeof(struct ntb_payload_header));
  410. qp->rx_pkts = 0;
  411. qp->tx_pkts = 0;
  412. }
  413. static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
  414. {
  415. struct ntb_transport_mw *mw = &nt->mw[num_mw];
  416. struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
  417. /* Alloc memory for receiving data. Must be 4k aligned */
  418. mw->size = ALIGN(size, 4096);
  419. mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
  420. GFP_KERNEL);
  421. if (!mw->virt_addr) {
  422. dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
  423. (int) mw->size);
  424. return -ENOMEM;
  425. }
  426. /* Notify HW the memory location of the receive buffer */
  427. ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
  428. return 0;
  429. }
  430. static void ntb_qp_link_cleanup(struct work_struct *work)
  431. {
  432. struct ntb_transport_qp *qp = container_of(work,
  433. struct ntb_transport_qp,
  434. link_cleanup);
  435. struct ntb_transport *nt = qp->transport;
  436. struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
  437. if (qp->qp_link == NTB_LINK_DOWN) {
  438. cancel_delayed_work_sync(&qp->link_work);
  439. return;
  440. }
  441. if (qp->event_handler)
  442. qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
  443. dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
  444. qp->qp_link = NTB_LINK_DOWN;
  445. if (nt->transport_link == NTB_LINK_UP)
  446. schedule_delayed_work(&qp->link_work,
  447. msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
  448. }
  449. static void ntb_qp_link_down(struct ntb_transport_qp *qp)
  450. {
  451. schedule_work(&qp->link_cleanup);
  452. }
  453. static void ntb_transport_link_cleanup(struct work_struct *work)
  454. {
  455. struct ntb_transport *nt = container_of(work, struct ntb_transport,
  456. link_cleanup);
  457. int i;
  458. if (nt->transport_link == NTB_LINK_DOWN)
  459. cancel_delayed_work_sync(&nt->link_work);
  460. else
  461. nt->transport_link = NTB_LINK_DOWN;
  462. /* Pass along the info to any clients */
  463. for (i = 0; i < nt->max_qps; i++)
  464. if (!test_bit(i, &nt->qp_bitmap))
  465. ntb_qp_link_down(&nt->qps[i]);
  466. /* The scratchpad registers keep the values if the remote side
  467. * goes down, blast them now to give them a sane value the next
  468. * time they are accessed
  469. */
  470. for (i = 0; i < MAX_SPAD; i++)
  471. ntb_write_local_spad(nt->ndev, i, 0);
  472. }
  473. static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
  474. {
  475. struct ntb_transport *nt = data;
  476. switch (event) {
  477. case NTB_EVENT_HW_LINK_UP:
  478. schedule_delayed_work(&nt->link_work, 0);
  479. break;
  480. case NTB_EVENT_HW_LINK_DOWN:
  481. schedule_work(&nt->link_cleanup);
  482. break;
  483. default:
  484. BUG();
  485. }
  486. }
  487. static void ntb_transport_link_work(struct work_struct *work)
  488. {
  489. struct ntb_transport *nt = container_of(work, struct ntb_transport,
  490. link_work.work);
  491. struct ntb_device *ndev = nt->ndev;
  492. struct pci_dev *pdev = ntb_query_pdev(ndev);
  493. u32 val;
  494. int rc, i;
  495. /* send the local info */
  496. rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
  497. if (rc) {
  498. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  499. 0, VERSION);
  500. goto out;
  501. }
  502. rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
  503. if (rc) {
  504. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  505. (u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
  506. goto out;
  507. }
  508. rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
  509. if (rc) {
  510. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  511. (u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
  512. goto out;
  513. }
  514. rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
  515. if (rc) {
  516. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  517. nt->max_qps, NUM_QPS);
  518. goto out;
  519. }
  520. rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
  521. if (rc) {
  522. dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
  523. goto out;
  524. }
  525. rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
  526. if (rc) {
  527. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  528. val, QP_LINKS);
  529. goto out;
  530. }
  531. /* Query the remote side for its info */
  532. rc = ntb_read_remote_spad(ndev, VERSION, &val);
  533. if (rc) {
  534. dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
  535. goto out;
  536. }
  537. if (val != NTB_TRANSPORT_VERSION)
  538. goto out;
  539. dev_dbg(&pdev->dev, "Remote version = %d\n", val);
  540. rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
  541. if (rc) {
  542. dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
  543. goto out;
  544. }
  545. if (val != nt->max_qps)
  546. goto out;
  547. dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
  548. rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
  549. if (rc) {
  550. dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
  551. goto out;
  552. }
  553. if (!val)
  554. goto out;
  555. dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
  556. rc = ntb_set_mw(nt, 0, val);
  557. if (rc)
  558. goto out;
  559. rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
  560. if (rc) {
  561. dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
  562. goto out;
  563. }
  564. if (!val)
  565. goto out;
  566. dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
  567. rc = ntb_set_mw(nt, 1, val);
  568. if (rc)
  569. goto out;
  570. nt->transport_link = NTB_LINK_UP;
  571. for (i = 0; i < nt->max_qps; i++) {
  572. struct ntb_transport_qp *qp = &nt->qps[i];
  573. ntb_transport_setup_qp_mw(nt, i);
  574. if (qp->client_ready == NTB_LINK_UP)
  575. schedule_delayed_work(&qp->link_work, 0);
  576. }
  577. return;
  578. out:
  579. if (ntb_hw_link_status(ndev))
  580. schedule_delayed_work(&nt->link_work,
  581. msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
  582. }
  583. static void ntb_qp_link_work(struct work_struct *work)
  584. {
  585. struct ntb_transport_qp *qp = container_of(work,
  586. struct ntb_transport_qp,
  587. link_work.work);
  588. struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
  589. struct ntb_transport *nt = qp->transport;
  590. int rc, val;
  591. WARN_ON(nt->transport_link != NTB_LINK_UP);
  592. rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
  593. if (rc) {
  594. dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
  595. return;
  596. }
  597. rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
  598. if (rc)
  599. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  600. val | 1 << qp->qp_num, QP_LINKS);
  601. /* query remote spad for qp ready bits */
  602. rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
  603. if (rc)
  604. dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
  605. dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
  606. /* See if the remote side is up */
  607. if (1 << qp->qp_num & val) {
  608. qp->qp_link = NTB_LINK_UP;
  609. dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
  610. if (qp->event_handler)
  611. qp->event_handler(qp->cb_data, NTB_LINK_UP);
  612. } else if (nt->transport_link == NTB_LINK_UP)
  613. schedule_delayed_work(&qp->link_work,
  614. msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
  615. }
  616. static void ntb_transport_init_queue(struct ntb_transport *nt,
  617. unsigned int qp_num)
  618. {
  619. struct ntb_transport_qp *qp;
  620. unsigned int num_qps_mw, tx_size;
  621. u8 mw_num = QP_TO_MW(qp_num);
  622. qp = &nt->qps[qp_num];
  623. qp->qp_num = qp_num;
  624. qp->transport = nt;
  625. qp->ndev = nt->ndev;
  626. qp->qp_link = NTB_LINK_DOWN;
  627. qp->client_ready = NTB_LINK_DOWN;
  628. qp->event_handler = NULL;
  629. if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
  630. num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
  631. else
  632. num_qps_mw = nt->max_qps / NTB_NUM_MW;
  633. tx_size = ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
  634. qp->tx_mw_begin = ntb_get_mw_vbase(nt->ndev, mw_num) +
  635. (qp_num / NTB_NUM_MW * tx_size);
  636. qp->tx_mw_end = qp->tx_mw_begin + tx_size;
  637. qp->tx_offset = qp->tx_mw_begin;
  638. qp->tx_max_frame = min(transport_mtu, tx_size);
  639. if (nt->debugfs_dir) {
  640. char debugfs_name[4];
  641. snprintf(debugfs_name, 4, "qp%d", qp_num);
  642. qp->debugfs_dir = debugfs_create_dir(debugfs_name,
  643. nt->debugfs_dir);
  644. qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
  645. qp->debugfs_dir, qp,
  646. &ntb_qp_debugfs_stats);
  647. }
  648. INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
  649. INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
  650. spin_lock_init(&qp->ntb_rx_pend_q_lock);
  651. spin_lock_init(&qp->ntb_rx_free_q_lock);
  652. spin_lock_init(&qp->ntb_tx_free_q_lock);
  653. INIT_LIST_HEAD(&qp->rx_pend_q);
  654. INIT_LIST_HEAD(&qp->rx_free_q);
  655. INIT_LIST_HEAD(&qp->tx_free_q);
  656. }
  657. int ntb_transport_init(struct pci_dev *pdev)
  658. {
  659. struct ntb_transport *nt;
  660. int rc, i;
  661. nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
  662. if (!nt)
  663. return -ENOMEM;
  664. if (debugfs_initialized())
  665. nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
  666. else
  667. nt->debugfs_dir = NULL;
  668. nt->ndev = ntb_register_transport(pdev, nt);
  669. if (!nt->ndev) {
  670. rc = -EIO;
  671. goto err;
  672. }
  673. nt->max_qps = min(nt->ndev->max_cbs, max_num_clients);
  674. nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
  675. GFP_KERNEL);
  676. if (!nt->qps) {
  677. rc = -ENOMEM;
  678. goto err1;
  679. }
  680. nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
  681. for (i = 0; i < nt->max_qps; i++)
  682. ntb_transport_init_queue(nt, i);
  683. INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
  684. INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
  685. rc = ntb_register_event_callback(nt->ndev,
  686. ntb_transport_event_callback);
  687. if (rc)
  688. goto err2;
  689. INIT_LIST_HEAD(&nt->client_devs);
  690. rc = ntb_bus_init(nt);
  691. if (rc)
  692. goto err3;
  693. if (ntb_hw_link_status(nt->ndev))
  694. schedule_delayed_work(&nt->link_work, 0);
  695. return 0;
  696. err3:
  697. ntb_unregister_event_callback(nt->ndev);
  698. err2:
  699. kfree(nt->qps);
  700. err1:
  701. ntb_unregister_transport(nt->ndev);
  702. err:
  703. debugfs_remove_recursive(nt->debugfs_dir);
  704. kfree(nt);
  705. return rc;
  706. }
  707. void ntb_transport_free(void *transport)
  708. {
  709. struct ntb_transport *nt = transport;
  710. struct pci_dev *pdev;
  711. int i;
  712. nt->transport_link = NTB_LINK_DOWN;
  713. /* verify that all the qp's are freed */
  714. for (i = 0; i < nt->max_qps; i++)
  715. if (!test_bit(i, &nt->qp_bitmap))
  716. ntb_transport_free_queue(&nt->qps[i]);
  717. ntb_bus_remove(nt);
  718. cancel_delayed_work_sync(&nt->link_work);
  719. debugfs_remove_recursive(nt->debugfs_dir);
  720. ntb_unregister_event_callback(nt->ndev);
  721. pdev = ntb_query_pdev(nt->ndev);
  722. for (i = 0; i < NTB_NUM_MW; i++)
  723. if (nt->mw[i].virt_addr)
  724. dma_free_coherent(&pdev->dev, nt->mw[i].size,
  725. nt->mw[i].virt_addr,
  726. nt->mw[i].dma_addr);
  727. kfree(nt->qps);
  728. ntb_unregister_transport(nt->ndev);
  729. kfree(nt);
  730. }
  731. static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
  732. struct ntb_queue_entry *entry, void *offset)
  733. {
  734. struct ntb_payload_header *hdr;
  735. BUG_ON(offset < qp->rx_buff_begin ||
  736. offset + qp->rx_max_frame >= qp->rx_buff_end);
  737. hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
  738. entry->len = hdr->len;
  739. memcpy(entry->buf, offset, entry->len);
  740. /* Ensure that the data is fully copied out before clearing the flag */
  741. wmb();
  742. hdr->flags = 0;
  743. if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
  744. qp->rx_handler(qp, qp->cb_data, entry->cb_data, entry->len);
  745. ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
  746. }
  747. static int ntb_process_rxc(struct ntb_transport_qp *qp)
  748. {
  749. struct ntb_payload_header *hdr;
  750. struct ntb_queue_entry *entry;
  751. void *offset;
  752. entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
  753. if (!entry) {
  754. hdr = offset + qp->rx_max_frame -
  755. sizeof(struct ntb_payload_header);
  756. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  757. "no buffer - HDR ver %llu, len %d, flags %x\n",
  758. hdr->ver, hdr->len, hdr->flags);
  759. qp->rx_err_no_buf++;
  760. return -ENOMEM;
  761. }
  762. offset = qp->rx_offset;
  763. hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
  764. if (!(hdr->flags & DESC_DONE_FLAG)) {
  765. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  766. &qp->rx_pend_q);
  767. qp->rx_ring_empty++;
  768. return -EAGAIN;
  769. }
  770. if (hdr->ver != qp->rx_pkts) {
  771. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  772. "qp %d: version mismatch, expected %llu - got %llu\n",
  773. qp->qp_num, qp->rx_pkts, hdr->ver);
  774. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  775. &qp->rx_pend_q);
  776. qp->rx_err_ver++;
  777. return -EIO;
  778. }
  779. if (hdr->flags & LINK_DOWN_FLAG) {
  780. ntb_qp_link_down(qp);
  781. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  782. &qp->rx_pend_q);
  783. /* Ensure that the data is fully copied out before clearing the
  784. * done flag
  785. */
  786. wmb();
  787. hdr->flags = 0;
  788. goto out;
  789. }
  790. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  791. "rx offset %p, ver %llu - %d payload received, buf size %d\n",
  792. qp->rx_offset, hdr->ver, hdr->len, entry->len);
  793. if (hdr->len <= entry->len)
  794. ntb_rx_copy_task(qp, entry, offset);
  795. else {
  796. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
  797. &qp->rx_pend_q);
  798. /* Ensure that the data is fully copied out before clearing the
  799. * done flag
  800. */
  801. wmb();
  802. hdr->flags = 0;
  803. qp->rx_err_oflow++;
  804. dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
  805. "RX overflow! Wanted %d got %d\n",
  806. hdr->len, entry->len);
  807. }
  808. qp->rx_bytes += hdr->len;
  809. qp->rx_pkts++;
  810. out:
  811. qp->rx_offset += qp->rx_max_frame;
  812. if (qp->rx_offset + qp->rx_max_frame >= qp->rx_buff_end)
  813. qp->rx_offset = qp->rx_buff_begin;
  814. return 0;
  815. }
  816. static void ntb_transport_rx(unsigned long data)
  817. {
  818. struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
  819. int rc;
  820. do {
  821. rc = ntb_process_rxc(qp);
  822. } while (!rc);
  823. }
  824. static void ntb_transport_rxc_db(void *data, int db_num)
  825. {
  826. struct ntb_transport_qp *qp = data;
  827. dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
  828. __func__, db_num);
  829. tasklet_schedule(&qp->rx_work);
  830. }
  831. static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
  832. struct ntb_queue_entry *entry,
  833. void *offset)
  834. {
  835. struct ntb_payload_header *hdr;
  836. BUG_ON(offset < qp->tx_mw_begin ||
  837. offset + qp->tx_max_frame >= qp->tx_mw_end);
  838. memcpy_toio(offset, entry->buf, entry->len);
  839. hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
  840. hdr->len = entry->len;
  841. hdr->ver = qp->tx_pkts;
  842. /* Ensure that the data is fully copied out before setting the flag */
  843. wmb();
  844. hdr->flags = entry->flags | DESC_DONE_FLAG;
  845. ntb_ring_sdb(qp->ndev, qp->qp_num);
  846. /* The entry length can only be zero if the packet is intended to be a
  847. * "link down" or similar. Since no payload is being sent in these
  848. * cases, there is nothing to add to the completion queue.
  849. */
  850. if (entry->len > 0) {
  851. qp->tx_bytes += entry->len;
  852. if (qp->tx_handler)
  853. qp->tx_handler(qp, qp->cb_data, entry->cb_data,
  854. entry->len);
  855. }
  856. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
  857. }
  858. static int ntb_process_tx(struct ntb_transport_qp *qp,
  859. struct ntb_queue_entry *entry)
  860. {
  861. struct ntb_payload_header *hdr;
  862. void *offset;
  863. offset = qp->tx_offset;
  864. hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
  865. dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %p, entry len %d flags %x buff %p\n",
  866. qp->tx_pkts, offset, qp->tx_offset, entry->len, entry->flags,
  867. entry->buf);
  868. if (hdr->flags) {
  869. qp->tx_ring_full++;
  870. return -EAGAIN;
  871. }
  872. if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
  873. if (qp->tx_handler)
  874. qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
  875. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
  876. &qp->tx_free_q);
  877. return 0;
  878. }
  879. ntb_tx_copy_task(qp, entry, offset);
  880. qp->tx_offset += qp->tx_max_frame;
  881. if (qp->tx_offset + qp->tx_max_frame >= qp->tx_mw_end)
  882. qp->tx_offset = qp->tx_mw_begin;
  883. qp->tx_pkts++;
  884. return 0;
  885. }
  886. static void ntb_send_link_down(struct ntb_transport_qp *qp)
  887. {
  888. struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
  889. struct ntb_queue_entry *entry;
  890. int i, rc;
  891. if (qp->qp_link == NTB_LINK_DOWN)
  892. return;
  893. qp->qp_link = NTB_LINK_DOWN;
  894. dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
  895. for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
  896. entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
  897. if (entry)
  898. break;
  899. msleep(100);
  900. }
  901. if (!entry)
  902. return;
  903. entry->cb_data = NULL;
  904. entry->buf = NULL;
  905. entry->len = 0;
  906. entry->flags = LINK_DOWN_FLAG;
  907. rc = ntb_process_tx(qp, entry);
  908. if (rc)
  909. dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
  910. qp->qp_num);
  911. }
  912. /**
  913. * ntb_transport_create_queue - Create a new NTB transport layer queue
  914. * @rx_handler: receive callback function
  915. * @tx_handler: transmit callback function
  916. * @event_handler: event callback function
  917. *
  918. * Create a new NTB transport layer queue and provide the queue with a callback
  919. * routine for both transmit and receive. The receive callback routine will be
  920. * used to pass up data when the transport has received it on the queue. The
  921. * transmit callback routine will be called when the transport has completed the
  922. * transmission of the data on the queue and the data is ready to be freed.
  923. *
  924. * RETURNS: pointer to newly created ntb_queue, NULL on error.
  925. */
  926. struct ntb_transport_qp *
  927. ntb_transport_create_queue(void *data, struct pci_dev *pdev,
  928. const struct ntb_queue_handlers *handlers)
  929. {
  930. struct ntb_queue_entry *entry;
  931. struct ntb_transport_qp *qp;
  932. struct ntb_transport *nt;
  933. unsigned int free_queue;
  934. int rc, i;
  935. nt = ntb_find_transport(pdev);
  936. if (!nt)
  937. goto err;
  938. free_queue = ffs(nt->qp_bitmap);
  939. if (!free_queue)
  940. goto err;
  941. /* decrement free_queue to make it zero based */
  942. free_queue--;
  943. clear_bit(free_queue, &nt->qp_bitmap);
  944. qp = &nt->qps[free_queue];
  945. qp->cb_data = data;
  946. qp->rx_handler = handlers->rx_handler;
  947. qp->tx_handler = handlers->tx_handler;
  948. qp->event_handler = handlers->event_handler;
  949. for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
  950. entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
  951. if (!entry)
  952. goto err1;
  953. ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
  954. &qp->rx_free_q);
  955. }
  956. for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
  957. entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
  958. if (!entry)
  959. goto err2;
  960. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
  961. &qp->tx_free_q);
  962. }
  963. tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
  964. rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
  965. ntb_transport_rxc_db);
  966. if (rc)
  967. goto err3;
  968. dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
  969. return qp;
  970. err3:
  971. tasklet_disable(&qp->rx_work);
  972. err2:
  973. while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
  974. kfree(entry);
  975. err1:
  976. while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
  977. kfree(entry);
  978. set_bit(free_queue, &nt->qp_bitmap);
  979. err:
  980. return NULL;
  981. }
  982. EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
  983. /**
  984. * ntb_transport_free_queue - Frees NTB transport queue
  985. * @qp: NTB queue to be freed
  986. *
  987. * Frees NTB transport queue
  988. */
  989. void ntb_transport_free_queue(struct ntb_transport_qp *qp)
  990. {
  991. struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
  992. struct ntb_queue_entry *entry;
  993. if (!qp)
  994. return;
  995. cancel_delayed_work_sync(&qp->link_work);
  996. ntb_unregister_db_callback(qp->ndev, qp->qp_num);
  997. tasklet_disable(&qp->rx_work);
  998. while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
  999. kfree(entry);
  1000. while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
  1001. dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
  1002. kfree(entry);
  1003. }
  1004. while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
  1005. kfree(entry);
  1006. set_bit(qp->qp_num, &qp->transport->qp_bitmap);
  1007. dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
  1008. }
  1009. EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
  1010. /**
  1011. * ntb_transport_rx_remove - Dequeues enqueued rx packet
  1012. * @qp: NTB queue to be freed
  1013. * @len: pointer to variable to write enqueued buffers length
  1014. *
  1015. * Dequeues unused buffers from receive queue. Should only be used during
  1016. * shutdown of qp.
  1017. *
  1018. * RETURNS: NULL error value on error, or void* for success.
  1019. */
  1020. void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
  1021. {
  1022. struct ntb_queue_entry *entry;
  1023. void *buf;
  1024. if (!qp || qp->client_ready == NTB_LINK_UP)
  1025. return NULL;
  1026. entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
  1027. if (!entry)
  1028. return NULL;
  1029. buf = entry->cb_data;
  1030. *len = entry->len;
  1031. ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
  1032. return buf;
  1033. }
  1034. EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
  1035. /**
  1036. * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
  1037. * @qp: NTB transport layer queue the entry is to be enqueued on
  1038. * @cb: per buffer pointer for callback function to use
  1039. * @data: pointer to data buffer that incoming packets will be copied into
  1040. * @len: length of the data buffer
  1041. *
  1042. * Enqueue a new receive buffer onto the transport queue into which a NTB
  1043. * payload can be received into.
  1044. *
  1045. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  1046. */
  1047. int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
  1048. unsigned int len)
  1049. {
  1050. struct ntb_queue_entry *entry;
  1051. if (!qp)
  1052. return -EINVAL;
  1053. entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
  1054. if (!entry)
  1055. return -ENOMEM;
  1056. entry->cb_data = cb;
  1057. entry->buf = data;
  1058. entry->len = len;
  1059. ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
  1060. return 0;
  1061. }
  1062. EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
  1063. /**
  1064. * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
  1065. * @qp: NTB transport layer queue the entry is to be enqueued on
  1066. * @cb: per buffer pointer for callback function to use
  1067. * @data: pointer to data buffer that will be sent
  1068. * @len: length of the data buffer
  1069. *
  1070. * Enqueue a new transmit buffer onto the transport queue from which a NTB
  1071. * payload will be transmitted. This assumes that a lock is behing held to
  1072. * serialize access to the qp.
  1073. *
  1074. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  1075. */
  1076. int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
  1077. unsigned int len)
  1078. {
  1079. struct ntb_queue_entry *entry;
  1080. int rc;
  1081. if (!qp || qp->qp_link != NTB_LINK_UP || !len)
  1082. return -EINVAL;
  1083. entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
  1084. if (!entry)
  1085. return -ENOMEM;
  1086. entry->cb_data = cb;
  1087. entry->buf = data;
  1088. entry->len = len;
  1089. entry->flags = 0;
  1090. rc = ntb_process_tx(qp, entry);
  1091. if (rc)
  1092. ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
  1093. &qp->tx_free_q);
  1094. return rc;
  1095. }
  1096. EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
  1097. /**
  1098. * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
  1099. * @qp: NTB transport layer queue to be enabled
  1100. *
  1101. * Notify NTB transport layer of client readiness to use queue
  1102. */
  1103. void ntb_transport_link_up(struct ntb_transport_qp *qp)
  1104. {
  1105. if (!qp)
  1106. return;
  1107. qp->client_ready = NTB_LINK_UP;
  1108. if (qp->transport->transport_link == NTB_LINK_UP)
  1109. schedule_delayed_work(&qp->link_work, 0);
  1110. }
  1111. EXPORT_SYMBOL_GPL(ntb_transport_link_up);
  1112. /**
  1113. * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
  1114. * @qp: NTB transport layer queue to be disabled
  1115. *
  1116. * Notify NTB transport layer of client's desire to no longer receive data on
  1117. * transport queue specified. It is the client's responsibility to ensure all
  1118. * entries on queue are purged or otherwise handled appropraitely.
  1119. */
  1120. void ntb_transport_link_down(struct ntb_transport_qp *qp)
  1121. {
  1122. struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
  1123. int rc, val;
  1124. if (!qp)
  1125. return;
  1126. qp->client_ready = NTB_LINK_DOWN;
  1127. rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
  1128. if (rc) {
  1129. dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
  1130. return;
  1131. }
  1132. rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
  1133. val & ~(1 << qp->qp_num));
  1134. if (rc)
  1135. dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
  1136. val & ~(1 << qp->qp_num), QP_LINKS);
  1137. if (qp->qp_link == NTB_LINK_UP)
  1138. ntb_send_link_down(qp);
  1139. else
  1140. cancel_delayed_work_sync(&qp->link_work);
  1141. }
  1142. EXPORT_SYMBOL_GPL(ntb_transport_link_down);
  1143. /**
  1144. * ntb_transport_link_query - Query transport link state
  1145. * @qp: NTB transport layer queue to be queried
  1146. *
  1147. * Query connectivity to the remote system of the NTB transport queue
  1148. *
  1149. * RETURNS: true for link up or false for link down
  1150. */
  1151. bool ntb_transport_link_query(struct ntb_transport_qp *qp)
  1152. {
  1153. return qp->qp_link == NTB_LINK_UP;
  1154. }
  1155. EXPORT_SYMBOL_GPL(ntb_transport_link_query);
  1156. /**
  1157. * ntb_transport_qp_num - Query the qp number
  1158. * @qp: NTB transport layer queue to be queried
  1159. *
  1160. * Query qp number of the NTB transport queue
  1161. *
  1162. * RETURNS: a zero based number specifying the qp number
  1163. */
  1164. unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
  1165. {
  1166. return qp->qp_num;
  1167. }
  1168. EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
  1169. /**
  1170. * ntb_transport_max_size - Query the max payload size of a qp
  1171. * @qp: NTB transport layer queue to be queried
  1172. *
  1173. * Query the maximum payload size permissible on the given qp
  1174. *
  1175. * RETURNS: the max payload size of a qp
  1176. */
  1177. unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
  1178. {
  1179. return qp->tx_max_frame - sizeof(struct ntb_payload_header);
  1180. }
  1181. EXPORT_SYMBOL_GPL(ntb_transport_max_size);