c2.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256
  1. /*
  2. * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
  3. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/module.h>
  34. #include <linux/moduleparam.h>
  35. #include <linux/pci.h>
  36. #include <linux/netdevice.h>
  37. #include <linux/etherdevice.h>
  38. #include <linux/inetdevice.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/delay.h>
  41. #include <linux/ethtool.h>
  42. #include <linux/mii.h>
  43. #include <linux/if_vlan.h>
  44. #include <linux/crc32.h>
  45. #include <linux/in.h>
  46. #include <linux/ip.h>
  47. #include <linux/tcp.h>
  48. #include <linux/init.h>
  49. #include <linux/dma-mapping.h>
  50. #include <linux/slab.h>
  51. #include <linux/prefetch.h>
  52. #include <asm/io.h>
  53. #include <asm/irq.h>
  54. #include <asm/byteorder.h>
  55. #include <rdma/ib_smi.h>
  56. #include "c2.h"
  57. #include "c2_provider.h"
  58. MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
  59. MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
  60. MODULE_LICENSE("Dual BSD/GPL");
  61. MODULE_VERSION(DRV_VERSION);
  62. static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
  63. | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
  64. static int debug = -1; /* defaults above */
  65. module_param(debug, int, 0);
  66. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  67. static int c2_up(struct net_device *netdev);
  68. static int c2_down(struct net_device *netdev);
  69. static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
  70. static void c2_tx_interrupt(struct net_device *netdev);
  71. static void c2_rx_interrupt(struct net_device *netdev);
  72. static irqreturn_t c2_interrupt(int irq, void *dev_id);
  73. static void c2_tx_timeout(struct net_device *netdev);
  74. static int c2_change_mtu(struct net_device *netdev, int new_mtu);
  75. static void c2_reset(struct c2_port *c2_port);
  76. static struct pci_device_id c2_pci_table[] = {
  77. { PCI_DEVICE(0x18b8, 0xb001) },
  78. { 0 }
  79. };
  80. MODULE_DEVICE_TABLE(pci, c2_pci_table);
  81. static void c2_print_macaddr(struct net_device *netdev)
  82. {
  83. pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
  84. }
  85. static void c2_set_rxbufsize(struct c2_port *c2_port)
  86. {
  87. struct net_device *netdev = c2_port->netdev;
  88. if (netdev->mtu > RX_BUF_SIZE)
  89. c2_port->rx_buf_size =
  90. netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +
  91. NET_IP_ALIGN;
  92. else
  93. c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
  94. }
  95. /*
  96. * Allocate TX ring elements and chain them together.
  97. * One-to-one association of adapter descriptors with ring elements.
  98. */
  99. static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
  100. dma_addr_t base, void __iomem * mmio_txp_ring)
  101. {
  102. struct c2_tx_desc *tx_desc;
  103. struct c2_txp_desc __iomem *txp_desc;
  104. struct c2_element *elem;
  105. int i;
  106. tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL);
  107. if (!tx_ring->start)
  108. return -ENOMEM;
  109. elem = tx_ring->start;
  110. tx_desc = vaddr;
  111. txp_desc = mmio_txp_ring;
  112. for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
  113. tx_desc->len = 0;
  114. tx_desc->status = 0;
  115. /* Set TXP_HTXD_UNINIT */
  116. __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
  117. (void __iomem *) txp_desc + C2_TXP_ADDR);
  118. __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
  119. __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
  120. (void __iomem *) txp_desc + C2_TXP_FLAGS);
  121. elem->skb = NULL;
  122. elem->ht_desc = tx_desc;
  123. elem->hw_desc = txp_desc;
  124. if (i == tx_ring->count - 1) {
  125. elem->next = tx_ring->start;
  126. tx_desc->next_offset = base;
  127. } else {
  128. elem->next = elem + 1;
  129. tx_desc->next_offset =
  130. base + (i + 1) * sizeof(*tx_desc);
  131. }
  132. }
  133. tx_ring->to_use = tx_ring->to_clean = tx_ring->start;
  134. return 0;
  135. }
  136. /*
  137. * Allocate RX ring elements and chain them together.
  138. * One-to-one association of adapter descriptors with ring elements.
  139. */
  140. static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
  141. dma_addr_t base, void __iomem * mmio_rxp_ring)
  142. {
  143. struct c2_rx_desc *rx_desc;
  144. struct c2_rxp_desc __iomem *rxp_desc;
  145. struct c2_element *elem;
  146. int i;
  147. rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL);
  148. if (!rx_ring->start)
  149. return -ENOMEM;
  150. elem = rx_ring->start;
  151. rx_desc = vaddr;
  152. rxp_desc = mmio_rxp_ring;
  153. for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {
  154. rx_desc->len = 0;
  155. rx_desc->status = 0;
  156. /* Set RXP_HRXD_UNINIT */
  157. __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
  158. (void __iomem *) rxp_desc + C2_RXP_STATUS);
  159. __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
  160. __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
  161. __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
  162. (void __iomem *) rxp_desc + C2_RXP_ADDR);
  163. __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
  164. (void __iomem *) rxp_desc + C2_RXP_FLAGS);
  165. elem->skb = NULL;
  166. elem->ht_desc = rx_desc;
  167. elem->hw_desc = rxp_desc;
  168. if (i == rx_ring->count - 1) {
  169. elem->next = rx_ring->start;
  170. rx_desc->next_offset = base;
  171. } else {
  172. elem->next = elem + 1;
  173. rx_desc->next_offset =
  174. base + (i + 1) * sizeof(*rx_desc);
  175. }
  176. }
  177. rx_ring->to_use = rx_ring->to_clean = rx_ring->start;
  178. return 0;
  179. }
  180. /* Setup buffer for receiving */
  181. static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
  182. {
  183. struct c2_dev *c2dev = c2_port->c2dev;
  184. struct c2_rx_desc *rx_desc = elem->ht_desc;
  185. struct sk_buff *skb;
  186. dma_addr_t mapaddr;
  187. u32 maplen;
  188. struct c2_rxp_hdr *rxp_hdr;
  189. skb = dev_alloc_skb(c2_port->rx_buf_size);
  190. if (unlikely(!skb)) {
  191. pr_debug("%s: out of memory for receive\n",
  192. c2_port->netdev->name);
  193. return -ENOMEM;
  194. }
  195. /* Zero out the rxp hdr in the sk_buff */
  196. memset(skb->data, 0, sizeof(*rxp_hdr));
  197. skb->dev = c2_port->netdev;
  198. maplen = c2_port->rx_buf_size;
  199. mapaddr =
  200. pci_map_single(c2dev->pcidev, skb->data, maplen,
  201. PCI_DMA_FROMDEVICE);
  202. /* Set the sk_buff RXP_header to RXP_HRXD_READY */
  203. rxp_hdr = (struct c2_rxp_hdr *) skb->data;
  204. rxp_hdr->flags = RXP_HRXD_READY;
  205. __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
  206. __raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
  207. elem->hw_desc + C2_RXP_LEN);
  208. __raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
  209. __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
  210. elem->hw_desc + C2_RXP_FLAGS);
  211. elem->skb = skb;
  212. elem->mapaddr = mapaddr;
  213. elem->maplen = maplen;
  214. rx_desc->len = maplen;
  215. return 0;
  216. }
  217. /*
  218. * Allocate buffers for the Rx ring
  219. * For receive: rx_ring.to_clean is next received frame
  220. */
  221. static int c2_rx_fill(struct c2_port *c2_port)
  222. {
  223. struct c2_ring *rx_ring = &c2_port->rx_ring;
  224. struct c2_element *elem;
  225. int ret = 0;
  226. elem = rx_ring->start;
  227. do {
  228. if (c2_rx_alloc(c2_port, elem)) {
  229. ret = 1;
  230. break;
  231. }
  232. } while ((elem = elem->next) != rx_ring->start);
  233. rx_ring->to_clean = rx_ring->start;
  234. return ret;
  235. }
  236. /* Free all buffers in RX ring, assumes receiver stopped */
  237. static void c2_rx_clean(struct c2_port *c2_port)
  238. {
  239. struct c2_dev *c2dev = c2_port->c2dev;
  240. struct c2_ring *rx_ring = &c2_port->rx_ring;
  241. struct c2_element *elem;
  242. struct c2_rx_desc *rx_desc;
  243. elem = rx_ring->start;
  244. do {
  245. rx_desc = elem->ht_desc;
  246. rx_desc->len = 0;
  247. __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
  248. __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
  249. __raw_writew(0, elem->hw_desc + C2_RXP_LEN);
  250. __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
  251. elem->hw_desc + C2_RXP_ADDR);
  252. __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
  253. elem->hw_desc + C2_RXP_FLAGS);
  254. if (elem->skb) {
  255. pci_unmap_single(c2dev->pcidev, elem->mapaddr,
  256. elem->maplen, PCI_DMA_FROMDEVICE);
  257. dev_kfree_skb(elem->skb);
  258. elem->skb = NULL;
  259. }
  260. } while ((elem = elem->next) != rx_ring->start);
  261. }
  262. static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem)
  263. {
  264. struct c2_tx_desc *tx_desc = elem->ht_desc;
  265. tx_desc->len = 0;
  266. pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,
  267. PCI_DMA_TODEVICE);
  268. if (elem->skb) {
  269. dev_kfree_skb_any(elem->skb);
  270. elem->skb = NULL;
  271. }
  272. return 0;
  273. }
  274. /* Free all buffers in TX ring, assumes transmitter stopped */
  275. static void c2_tx_clean(struct c2_port *c2_port)
  276. {
  277. struct c2_ring *tx_ring = &c2_port->tx_ring;
  278. struct c2_element *elem;
  279. struct c2_txp_desc txp_htxd;
  280. int retry;
  281. unsigned long flags;
  282. spin_lock_irqsave(&c2_port->tx_lock, flags);
  283. elem = tx_ring->start;
  284. do {
  285. retry = 0;
  286. do {
  287. txp_htxd.flags =
  288. readw(elem->hw_desc + C2_TXP_FLAGS);
  289. if (txp_htxd.flags == TXP_HTXD_READY) {
  290. retry = 1;
  291. __raw_writew(0,
  292. elem->hw_desc + C2_TXP_LEN);
  293. __raw_writeq(0,
  294. elem->hw_desc + C2_TXP_ADDR);
  295. __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
  296. elem->hw_desc + C2_TXP_FLAGS);
  297. c2_port->netdev->stats.tx_dropped++;
  298. break;
  299. } else {
  300. __raw_writew(0,
  301. elem->hw_desc + C2_TXP_LEN);
  302. __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
  303. elem->hw_desc + C2_TXP_ADDR);
  304. __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
  305. elem->hw_desc + C2_TXP_FLAGS);
  306. }
  307. c2_tx_free(c2_port->c2dev, elem);
  308. } while ((elem = elem->next) != tx_ring->start);
  309. } while (retry);
  310. c2_port->tx_avail = c2_port->tx_ring.count - 1;
  311. c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
  312. if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
  313. netif_wake_queue(c2_port->netdev);
  314. spin_unlock_irqrestore(&c2_port->tx_lock, flags);
  315. }
  316. /*
  317. * Process transmit descriptors marked 'DONE' by the firmware,
  318. * freeing up their unneeded sk_buffs.
  319. */
  320. static void c2_tx_interrupt(struct net_device *netdev)
  321. {
  322. struct c2_port *c2_port = netdev_priv(netdev);
  323. struct c2_dev *c2dev = c2_port->c2dev;
  324. struct c2_ring *tx_ring = &c2_port->tx_ring;
  325. struct c2_element *elem;
  326. struct c2_txp_desc txp_htxd;
  327. spin_lock(&c2_port->tx_lock);
  328. for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
  329. elem = elem->next) {
  330. txp_htxd.flags =
  331. be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
  332. if (txp_htxd.flags != TXP_HTXD_DONE)
  333. break;
  334. if (netif_msg_tx_done(c2_port)) {
  335. /* PCI reads are expensive in fast path */
  336. txp_htxd.len =
  337. be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
  338. pr_debug("%s: tx done slot %3Zu status 0x%x len "
  339. "%5u bytes\n",
  340. netdev->name, elem - tx_ring->start,
  341. txp_htxd.flags, txp_htxd.len);
  342. }
  343. c2_tx_free(c2dev, elem);
  344. ++(c2_port->tx_avail);
  345. }
  346. tx_ring->to_clean = elem;
  347. if (netif_queue_stopped(netdev)
  348. && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
  349. netif_wake_queue(netdev);
  350. spin_unlock(&c2_port->tx_lock);
  351. }
  352. static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
  353. {
  354. struct c2_rx_desc *rx_desc = elem->ht_desc;
  355. struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
  356. if (rxp_hdr->status != RXP_HRXD_OK ||
  357. rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
  358. pr_debug("BAD RXP_HRXD\n");
  359. pr_debug(" rx_desc : %p\n", rx_desc);
  360. pr_debug(" index : %Zu\n",
  361. elem - c2_port->rx_ring.start);
  362. pr_debug(" len : %u\n", rx_desc->len);
  363. pr_debug(" rxp_hdr : %p [PA %p]\n", rxp_hdr,
  364. (void *) __pa((unsigned long) rxp_hdr));
  365. pr_debug(" flags : 0x%x\n", rxp_hdr->flags);
  366. pr_debug(" status: 0x%x\n", rxp_hdr->status);
  367. pr_debug(" len : %u\n", rxp_hdr->len);
  368. pr_debug(" rsvd : 0x%x\n", rxp_hdr->rsvd);
  369. }
  370. /* Setup the skb for reuse since we're dropping this pkt */
  371. elem->skb->data = elem->skb->head;
  372. skb_reset_tail_pointer(elem->skb);
  373. /* Zero out the rxp hdr in the sk_buff */
  374. memset(elem->skb->data, 0, sizeof(*rxp_hdr));
  375. /* Write the descriptor to the adapter's rx ring */
  376. __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
  377. __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
  378. __raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
  379. elem->hw_desc + C2_RXP_LEN);
  380. __raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
  381. elem->hw_desc + C2_RXP_ADDR);
  382. __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
  383. elem->hw_desc + C2_RXP_FLAGS);
  384. pr_debug("packet dropped\n");
  385. c2_port->netdev->stats.rx_dropped++;
  386. }
  387. static void c2_rx_interrupt(struct net_device *netdev)
  388. {
  389. struct c2_port *c2_port = netdev_priv(netdev);
  390. struct c2_dev *c2dev = c2_port->c2dev;
  391. struct c2_ring *rx_ring = &c2_port->rx_ring;
  392. struct c2_element *elem;
  393. struct c2_rx_desc *rx_desc;
  394. struct c2_rxp_hdr *rxp_hdr;
  395. struct sk_buff *skb;
  396. dma_addr_t mapaddr;
  397. u32 maplen, buflen;
  398. unsigned long flags;
  399. spin_lock_irqsave(&c2dev->lock, flags);
  400. /* Begin where we left off */
  401. rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;
  402. for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
  403. elem = elem->next) {
  404. rx_desc = elem->ht_desc;
  405. mapaddr = elem->mapaddr;
  406. maplen = elem->maplen;
  407. skb = elem->skb;
  408. rxp_hdr = (struct c2_rxp_hdr *) skb->data;
  409. if (rxp_hdr->flags != RXP_HRXD_DONE)
  410. break;
  411. buflen = rxp_hdr->len;
  412. /* Sanity check the RXP header */
  413. if (rxp_hdr->status != RXP_HRXD_OK ||
  414. buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
  415. c2_rx_error(c2_port, elem);
  416. continue;
  417. }
  418. /*
  419. * Allocate and map a new skb for replenishing the host
  420. * RX desc
  421. */
  422. if (c2_rx_alloc(c2_port, elem)) {
  423. c2_rx_error(c2_port, elem);
  424. continue;
  425. }
  426. /* Unmap the old skb */
  427. pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
  428. PCI_DMA_FROMDEVICE);
  429. prefetch(skb->data);
  430. /*
  431. * Skip past the leading 8 bytes comprising of the
  432. * "struct c2_rxp_hdr", prepended by the adapter
  433. * to the usual Ethernet header ("struct ethhdr"),
  434. * to the start of the raw Ethernet packet.
  435. *
  436. * Fix up the various fields in the sk_buff before
  437. * passing it up to netif_rx(). The transfer size
  438. * (in bytes) specified by the adapter len field of
  439. * the "struct rxp_hdr_t" does NOT include the
  440. * "sizeof(struct c2_rxp_hdr)".
  441. */
  442. skb->data += sizeof(*rxp_hdr);
  443. skb_set_tail_pointer(skb, buflen);
  444. skb->len = buflen;
  445. skb->protocol = eth_type_trans(skb, netdev);
  446. netif_rx(skb);
  447. netdev->stats.rx_packets++;
  448. netdev->stats.rx_bytes += buflen;
  449. }
  450. /* Save where we left off */
  451. rx_ring->to_clean = elem;
  452. c2dev->cur_rx = elem - rx_ring->start;
  453. C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
  454. spin_unlock_irqrestore(&c2dev->lock, flags);
  455. }
  456. /*
  457. * Handle netisr0 TX & RX interrupts.
  458. */
  459. static irqreturn_t c2_interrupt(int irq, void *dev_id)
  460. {
  461. unsigned int netisr0, dmaisr;
  462. int handled = 0;
  463. struct c2_dev *c2dev = (struct c2_dev *) dev_id;
  464. /* Process CCILNET interrupts */
  465. netisr0 = readl(c2dev->regs + C2_NISR0);
  466. if (netisr0) {
  467. /*
  468. * There is an issue with the firmware that always
  469. * provides the status of RX for both TX & RX
  470. * interrupts. So process both queues here.
  471. */
  472. c2_rx_interrupt(c2dev->netdev);
  473. c2_tx_interrupt(c2dev->netdev);
  474. /* Clear the interrupt */
  475. writel(netisr0, c2dev->regs + C2_NISR0);
  476. handled++;
  477. }
  478. /* Process RNIC interrupts */
  479. dmaisr = readl(c2dev->regs + C2_DISR);
  480. if (dmaisr) {
  481. writel(dmaisr, c2dev->regs + C2_DISR);
  482. c2_rnic_interrupt(c2dev);
  483. handled++;
  484. }
  485. if (handled) {
  486. return IRQ_HANDLED;
  487. } else {
  488. return IRQ_NONE;
  489. }
  490. }
  491. static int c2_up(struct net_device *netdev)
  492. {
  493. struct c2_port *c2_port = netdev_priv(netdev);
  494. struct c2_dev *c2dev = c2_port->c2dev;
  495. struct c2_element *elem;
  496. struct c2_rxp_hdr *rxp_hdr;
  497. struct in_device *in_dev;
  498. size_t rx_size, tx_size;
  499. int ret, i;
  500. unsigned int netimr0;
  501. if (netif_msg_ifup(c2_port))
  502. pr_debug("%s: enabling interface\n", netdev->name);
  503. /* Set the Rx buffer size based on MTU */
  504. c2_set_rxbufsize(c2_port);
  505. /* Allocate DMA'able memory for Tx/Rx host descriptor rings */
  506. rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
  507. tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
  508. c2_port->mem_size = tx_size + rx_size;
  509. c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
  510. &c2_port->dma);
  511. if (c2_port->mem == NULL) {
  512. pr_debug("Unable to allocate memory for "
  513. "host descriptor rings\n");
  514. return -ENOMEM;
  515. }
  516. memset(c2_port->mem, 0, c2_port->mem_size);
  517. /* Create the Rx host descriptor ring */
  518. if ((ret =
  519. c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
  520. c2dev->mmio_rxp_ring))) {
  521. pr_debug("Unable to create RX ring\n");
  522. goto bail0;
  523. }
  524. /* Allocate Rx buffers for the host descriptor ring */
  525. if (c2_rx_fill(c2_port)) {
  526. pr_debug("Unable to fill RX ring\n");
  527. goto bail1;
  528. }
  529. /* Create the Tx host descriptor ring */
  530. if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
  531. c2_port->dma + rx_size,
  532. c2dev->mmio_txp_ring))) {
  533. pr_debug("Unable to create TX ring\n");
  534. goto bail1;
  535. }
  536. /* Set the TX pointer to where we left off */
  537. c2_port->tx_avail = c2_port->tx_ring.count - 1;
  538. c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
  539. c2_port->tx_ring.start + c2dev->cur_tx;
  540. /* missing: Initialize MAC */
  541. BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
  542. /* Reset the adapter, ensures the driver is in sync with the RXP */
  543. c2_reset(c2_port);
  544. /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
  545. for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
  546. i++, elem++) {
  547. rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
  548. rxp_hdr->flags = 0;
  549. __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
  550. elem->hw_desc + C2_RXP_FLAGS);
  551. }
  552. /* Enable network packets */
  553. netif_start_queue(netdev);
  554. /* Enable IRQ */
  555. writel(0, c2dev->regs + C2_IDIS);
  556. netimr0 = readl(c2dev->regs + C2_NIMR0);
  557. netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
  558. writel(netimr0, c2dev->regs + C2_NIMR0);
  559. /* Tell the stack to ignore arp requests for ipaddrs bound to
  560. * other interfaces. This is needed to prevent the host stack
  561. * from responding to arp requests to the ipaddr bound on the
  562. * rdma interface.
  563. */
  564. in_dev = in_dev_get(netdev);
  565. IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
  566. in_dev_put(in_dev);
  567. return 0;
  568. bail1:
  569. c2_rx_clean(c2_port);
  570. kfree(c2_port->rx_ring.start);
  571. bail0:
  572. pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
  573. c2_port->dma);
  574. return ret;
  575. }
  576. static int c2_down(struct net_device *netdev)
  577. {
  578. struct c2_port *c2_port = netdev_priv(netdev);
  579. struct c2_dev *c2dev = c2_port->c2dev;
  580. if (netif_msg_ifdown(c2_port))
  581. pr_debug("%s: disabling interface\n",
  582. netdev->name);
  583. /* Wait for all the queued packets to get sent */
  584. c2_tx_interrupt(netdev);
  585. /* Disable network packets */
  586. netif_stop_queue(netdev);
  587. /* Disable IRQs by clearing the interrupt mask */
  588. writel(1, c2dev->regs + C2_IDIS);
  589. writel(0, c2dev->regs + C2_NIMR0);
  590. /* missing: Stop transmitter */
  591. /* missing: Stop receiver */
  592. /* Reset the adapter, ensures the driver is in sync with the RXP */
  593. c2_reset(c2_port);
  594. /* missing: Turn off LEDs here */
  595. /* Free all buffers in the host descriptor rings */
  596. c2_tx_clean(c2_port);
  597. c2_rx_clean(c2_port);
  598. /* Free the host descriptor rings */
  599. kfree(c2_port->rx_ring.start);
  600. kfree(c2_port->tx_ring.start);
  601. pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
  602. c2_port->dma);
  603. return 0;
  604. }
  605. static void c2_reset(struct c2_port *c2_port)
  606. {
  607. struct c2_dev *c2dev = c2_port->c2dev;
  608. unsigned int cur_rx = c2dev->cur_rx;
  609. /* Tell the hardware to quiesce */
  610. C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);
  611. /*
  612. * The hardware will reset the C2_PCI_HRX_QUI bit once
  613. * the RXP is quiesced. Wait 2 seconds for this.
  614. */
  615. ssleep(2);
  616. cur_rx = C2_GET_CUR_RX(c2dev);
  617. if (cur_rx & C2_PCI_HRX_QUI)
  618. pr_debug("c2_reset: failed to quiesce the hardware!\n");
  619. cur_rx &= ~C2_PCI_HRX_QUI;
  620. c2dev->cur_rx = cur_rx;
  621. pr_debug("Current RX: %u\n", c2dev->cur_rx);
  622. }
  623. static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  624. {
  625. struct c2_port *c2_port = netdev_priv(netdev);
  626. struct c2_dev *c2dev = c2_port->c2dev;
  627. struct c2_ring *tx_ring = &c2_port->tx_ring;
  628. struct c2_element *elem;
  629. dma_addr_t mapaddr;
  630. u32 maplen;
  631. unsigned long flags;
  632. unsigned int i;
  633. spin_lock_irqsave(&c2_port->tx_lock, flags);
  634. if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
  635. netif_stop_queue(netdev);
  636. spin_unlock_irqrestore(&c2_port->tx_lock, flags);
  637. pr_debug("%s: Tx ring full when queue awake!\n",
  638. netdev->name);
  639. return NETDEV_TX_BUSY;
  640. }
  641. maplen = skb_headlen(skb);
  642. mapaddr =
  643. pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);
  644. elem = tx_ring->to_use;
  645. elem->skb = skb;
  646. elem->mapaddr = mapaddr;
  647. elem->maplen = maplen;
  648. /* Tell HW to xmit */
  649. __raw_writeq((__force u64) cpu_to_be64(mapaddr),
  650. elem->hw_desc + C2_TXP_ADDR);
  651. __raw_writew((__force u16) cpu_to_be16(maplen),
  652. elem->hw_desc + C2_TXP_LEN);
  653. __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
  654. elem->hw_desc + C2_TXP_FLAGS);
  655. netdev->stats.tx_packets++;
  656. netdev->stats.tx_bytes += maplen;
  657. /* Loop thru additional data fragments and queue them */
  658. if (skb_shinfo(skb)->nr_frags) {
  659. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  660. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  661. maplen = frag->size;
  662. mapaddr =
  663. pci_map_page(c2dev->pcidev, frag->page,
  664. frag->page_offset, maplen,
  665. PCI_DMA_TODEVICE);
  666. elem = elem->next;
  667. elem->skb = NULL;
  668. elem->mapaddr = mapaddr;
  669. elem->maplen = maplen;
  670. /* Tell HW to xmit */
  671. __raw_writeq((__force u64) cpu_to_be64(mapaddr),
  672. elem->hw_desc + C2_TXP_ADDR);
  673. __raw_writew((__force u16) cpu_to_be16(maplen),
  674. elem->hw_desc + C2_TXP_LEN);
  675. __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
  676. elem->hw_desc + C2_TXP_FLAGS);
  677. netdev->stats.tx_packets++;
  678. netdev->stats.tx_bytes += maplen;
  679. }
  680. }
  681. tx_ring->to_use = elem->next;
  682. c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
  683. if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
  684. netif_stop_queue(netdev);
  685. if (netif_msg_tx_queued(c2_port))
  686. pr_debug("%s: transmit queue full\n",
  687. netdev->name);
  688. }
  689. spin_unlock_irqrestore(&c2_port->tx_lock, flags);
  690. netdev->trans_start = jiffies;
  691. return NETDEV_TX_OK;
  692. }
  693. static void c2_tx_timeout(struct net_device *netdev)
  694. {
  695. struct c2_port *c2_port = netdev_priv(netdev);
  696. if (netif_msg_timer(c2_port))
  697. pr_debug("%s: tx timeout\n", netdev->name);
  698. c2_tx_clean(c2_port);
  699. }
  700. static int c2_change_mtu(struct net_device *netdev, int new_mtu)
  701. {
  702. int ret = 0;
  703. if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
  704. return -EINVAL;
  705. netdev->mtu = new_mtu;
  706. if (netif_running(netdev)) {
  707. c2_down(netdev);
  708. c2_up(netdev);
  709. }
  710. return ret;
  711. }
  712. static const struct net_device_ops c2_netdev = {
  713. .ndo_open = c2_up,
  714. .ndo_stop = c2_down,
  715. .ndo_start_xmit = c2_xmit_frame,
  716. .ndo_tx_timeout = c2_tx_timeout,
  717. .ndo_change_mtu = c2_change_mtu,
  718. .ndo_set_mac_address = eth_mac_addr,
  719. .ndo_validate_addr = eth_validate_addr,
  720. };
  721. /* Initialize network device */
  722. static struct net_device *c2_devinit(struct c2_dev *c2dev,
  723. void __iomem * mmio_addr)
  724. {
  725. struct c2_port *c2_port = NULL;
  726. struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
  727. if (!netdev) {
  728. pr_debug("c2_port etherdev alloc failed");
  729. return NULL;
  730. }
  731. SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
  732. netdev->netdev_ops = &c2_netdev;
  733. netdev->watchdog_timeo = C2_TX_TIMEOUT;
  734. netdev->irq = c2dev->pcidev->irq;
  735. c2_port = netdev_priv(netdev);
  736. c2_port->netdev = netdev;
  737. c2_port->c2dev = c2dev;
  738. c2_port->msg_enable = netif_msg_init(debug, default_msg);
  739. c2_port->tx_ring.count = C2_NUM_TX_DESC;
  740. c2_port->rx_ring.count = C2_NUM_RX_DESC;
  741. spin_lock_init(&c2_port->tx_lock);
  742. /* Copy our 48-bit ethernet hardware address */
  743. memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
  744. /* Validate the MAC address */
  745. if (!is_valid_ether_addr(netdev->dev_addr)) {
  746. pr_debug("Invalid MAC Address\n");
  747. c2_print_macaddr(netdev);
  748. free_netdev(netdev);
  749. return NULL;
  750. }
  751. c2dev->netdev = netdev;
  752. return netdev;
  753. }
  754. static int __devinit c2_probe(struct pci_dev *pcidev,
  755. const struct pci_device_id *ent)
  756. {
  757. int ret = 0, i;
  758. unsigned long reg0_start, reg0_flags, reg0_len;
  759. unsigned long reg2_start, reg2_flags, reg2_len;
  760. unsigned long reg4_start, reg4_flags, reg4_len;
  761. unsigned kva_map_size;
  762. struct net_device *netdev = NULL;
  763. struct c2_dev *c2dev = NULL;
  764. void __iomem *mmio_regs = NULL;
  765. printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n",
  766. DRV_VERSION);
  767. /* Enable PCI device */
  768. ret = pci_enable_device(pcidev);
  769. if (ret) {
  770. printk(KERN_ERR PFX "%s: Unable to enable PCI device\n",
  771. pci_name(pcidev));
  772. goto bail0;
  773. }
  774. reg0_start = pci_resource_start(pcidev, BAR_0);
  775. reg0_len = pci_resource_len(pcidev, BAR_0);
  776. reg0_flags = pci_resource_flags(pcidev, BAR_0);
  777. reg2_start = pci_resource_start(pcidev, BAR_2);
  778. reg2_len = pci_resource_len(pcidev, BAR_2);
  779. reg2_flags = pci_resource_flags(pcidev, BAR_2);
  780. reg4_start = pci_resource_start(pcidev, BAR_4);
  781. reg4_len = pci_resource_len(pcidev, BAR_4);
  782. reg4_flags = pci_resource_flags(pcidev, BAR_4);
  783. pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len);
  784. pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len);
  785. pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len);
  786. /* Make sure PCI base addr are MMIO */
  787. if (!(reg0_flags & IORESOURCE_MEM) ||
  788. !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
  789. printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
  790. ret = -ENODEV;
  791. goto bail1;
  792. }
  793. /* Check for weird/broken PCI region reporting */
  794. if ((reg0_len < C2_REG0_SIZE) ||
  795. (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) {
  796. printk(KERN_ERR PFX "Invalid PCI region sizes\n");
  797. ret = -ENODEV;
  798. goto bail1;
  799. }
  800. /* Reserve PCI I/O and memory resources */
  801. ret = pci_request_regions(pcidev, DRV_NAME);
  802. if (ret) {
  803. printk(KERN_ERR PFX "%s: Unable to request regions\n",
  804. pci_name(pcidev));
  805. goto bail1;
  806. }
  807. if ((sizeof(dma_addr_t) > 4)) {
  808. ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
  809. if (ret < 0) {
  810. printk(KERN_ERR PFX "64b DMA configuration failed\n");
  811. goto bail2;
  812. }
  813. } else {
  814. ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
  815. if (ret < 0) {
  816. printk(KERN_ERR PFX "32b DMA configuration failed\n");
  817. goto bail2;
  818. }
  819. }
  820. /* Enables bus-mastering on the device */
  821. pci_set_master(pcidev);
  822. /* Remap the adapter PCI registers in BAR4 */
  823. mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
  824. sizeof(struct c2_adapter_pci_regs));
  825. if (!mmio_regs) {
  826. printk(KERN_ERR PFX
  827. "Unable to remap adapter PCI registers in BAR4\n");
  828. ret = -EIO;
  829. goto bail2;
  830. }
  831. /* Validate PCI regs magic */
  832. for (i = 0; i < sizeof(c2_magic); i++) {
  833. if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) {
  834. printk(KERN_ERR PFX "Downlevel Firmware boot loader "
  835. "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
  836. "utility to update your boot loader\n",
  837. i + 1, sizeof(c2_magic),
  838. readb(mmio_regs + C2_REGS_MAGIC + i),
  839. c2_magic[i]);
  840. printk(KERN_ERR PFX "Adapter not claimed\n");
  841. iounmap(mmio_regs);
  842. ret = -EIO;
  843. goto bail2;
  844. }
  845. }
  846. /* Validate the adapter version */
  847. if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
  848. printk(KERN_ERR PFX "Version mismatch "
  849. "[fw=%u, c2=%u], Adapter not claimed\n",
  850. be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
  851. C2_VERSION);
  852. ret = -EINVAL;
  853. iounmap(mmio_regs);
  854. goto bail2;
  855. }
  856. /* Validate the adapter IVN */
  857. if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
  858. printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
  859. "the OpenIB device support kit. "
  860. "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
  861. be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
  862. C2_IVN);
  863. ret = -EINVAL;
  864. iounmap(mmio_regs);
  865. goto bail2;
  866. }
  867. /* Allocate hardware structure */
  868. c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev));
  869. if (!c2dev) {
  870. printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n",
  871. pci_name(pcidev));
  872. ret = -ENOMEM;
  873. iounmap(mmio_regs);
  874. goto bail2;
  875. }
  876. memset(c2dev, 0, sizeof(*c2dev));
  877. spin_lock_init(&c2dev->lock);
  878. c2dev->pcidev = pcidev;
  879. c2dev->cur_tx = 0;
  880. /* Get the last RX index */
  881. c2dev->cur_rx =
  882. (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
  883. 0xffffc000) / sizeof(struct c2_rxp_desc);
  884. /* Request an interrupt line for the driver */
  885. ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev);
  886. if (ret) {
  887. printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
  888. pci_name(pcidev), pcidev->irq);
  889. iounmap(mmio_regs);
  890. goto bail3;
  891. }
  892. /* Set driver specific data */
  893. pci_set_drvdata(pcidev, c2dev);
  894. /* Initialize network device */
  895. if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
  896. iounmap(mmio_regs);
  897. goto bail4;
  898. }
  899. /* Save off the actual size prior to unmapping mmio_regs */
  900. kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
  901. /* Unmap the adapter PCI registers in BAR4 */
  902. iounmap(mmio_regs);
  903. /* Register network device */
  904. ret = register_netdev(netdev);
  905. if (ret) {
  906. printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n",
  907. ret);
  908. goto bail5;
  909. }
  910. /* Disable network packets */
  911. netif_stop_queue(netdev);
  912. /* Remap the adapter HRXDQ PA space to kernel VA space */
  913. c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
  914. C2_RXP_HRXDQ_SIZE);
  915. if (!c2dev->mmio_rxp_ring) {
  916. printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
  917. ret = -EIO;
  918. goto bail6;
  919. }
  920. /* Remap the adapter HTXDQ PA space to kernel VA space */
  921. c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
  922. C2_TXP_HTXDQ_SIZE);
  923. if (!c2dev->mmio_txp_ring) {
  924. printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
  925. ret = -EIO;
  926. goto bail7;
  927. }
  928. /* Save off the current RX index in the last 4 bytes of the TXP Ring */
  929. C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
  930. /* Remap the PCI registers in adapter BAR0 to kernel VA space */
  931. c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
  932. if (!c2dev->regs) {
  933. printk(KERN_ERR PFX "Unable to remap BAR0\n");
  934. ret = -EIO;
  935. goto bail8;
  936. }
  937. /* Remap the PCI registers in adapter BAR4 to kernel VA space */
  938. c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
  939. c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
  940. kva_map_size);
  941. if (!c2dev->kva) {
  942. printk(KERN_ERR PFX "Unable to remap BAR4\n");
  943. ret = -EIO;
  944. goto bail9;
  945. }
  946. /* Print out the MAC address */
  947. c2_print_macaddr(netdev);
  948. ret = c2_rnic_init(c2dev);
  949. if (ret) {
  950. printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret);
  951. goto bail10;
  952. }
  953. if (c2_register_device(c2dev))
  954. goto bail10;
  955. return 0;
  956. bail10:
  957. iounmap(c2dev->kva);
  958. bail9:
  959. iounmap(c2dev->regs);
  960. bail8:
  961. iounmap(c2dev->mmio_txp_ring);
  962. bail7:
  963. iounmap(c2dev->mmio_rxp_ring);
  964. bail6:
  965. unregister_netdev(netdev);
  966. bail5:
  967. free_netdev(netdev);
  968. bail4:
  969. free_irq(pcidev->irq, c2dev);
  970. bail3:
  971. ib_dealloc_device(&c2dev->ibdev);
  972. bail2:
  973. pci_release_regions(pcidev);
  974. bail1:
  975. pci_disable_device(pcidev);
  976. bail0:
  977. return ret;
  978. }
  979. static void __devexit c2_remove(struct pci_dev *pcidev)
  980. {
  981. struct c2_dev *c2dev = pci_get_drvdata(pcidev);
  982. struct net_device *netdev = c2dev->netdev;
  983. /* Unregister with OpenIB */
  984. c2_unregister_device(c2dev);
  985. /* Clean up the RNIC resources */
  986. c2_rnic_term(c2dev);
  987. /* Remove network device from the kernel */
  988. unregister_netdev(netdev);
  989. /* Free network device */
  990. free_netdev(netdev);
  991. /* Free the interrupt line */
  992. free_irq(pcidev->irq, c2dev);
  993. /* missing: Turn LEDs off here */
  994. /* Unmap adapter PA space */
  995. iounmap(c2dev->kva);
  996. iounmap(c2dev->regs);
  997. iounmap(c2dev->mmio_txp_ring);
  998. iounmap(c2dev->mmio_rxp_ring);
  999. /* Free the hardware structure */
  1000. ib_dealloc_device(&c2dev->ibdev);
  1001. /* Release reserved PCI I/O and memory resources */
  1002. pci_release_regions(pcidev);
  1003. /* Disable PCI device */
  1004. pci_disable_device(pcidev);
  1005. /* Clear driver specific data */
  1006. pci_set_drvdata(pcidev, NULL);
  1007. }
  1008. static struct pci_driver c2_pci_driver = {
  1009. .name = DRV_NAME,
  1010. .id_table = c2_pci_table,
  1011. .probe = c2_probe,
  1012. .remove = __devexit_p(c2_remove),
  1013. };
  1014. static int __init c2_init_module(void)
  1015. {
  1016. return pci_register_driver(&c2_pci_driver);
  1017. }
  1018. static void __exit c2_exit_module(void)
  1019. {
  1020. pci_unregister_driver(&c2_pci_driver);
  1021. }
  1022. module_init(c2_init_module);
  1023. module_exit(c2_exit_module);