rionet.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579
  1. /*
  2. * rionet - Ethernet driver over RapidIO messaging services
  3. *
  4. * Copyright 2005 MontaVista Software, Inc.
  5. * Matt Porter <mporter@kernel.crashing.org>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the
  9. * Free Software Foundation; either version 2 of the License, or (at your
  10. * option) any later version.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/delay.h>
  16. #include <linux/rio.h>
  17. #include <linux/rio_drv.h>
  18. #include <linux/rio_ids.h>
  19. #include <linux/netdevice.h>
  20. #include <linux/etherdevice.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/crc32.h>
  23. #include <linux/ethtool.h>
  24. #define DRV_NAME "rionet"
  25. #define DRV_VERSION "0.2"
  26. #define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
  27. #define DRV_DESC "Ethernet over RapidIO"
  28. MODULE_AUTHOR(DRV_AUTHOR);
  29. MODULE_DESCRIPTION(DRV_DESC);
  30. MODULE_LICENSE("GPL");
  31. #define RIONET_DEFAULT_MSGLEVEL \
  32. (NETIF_MSG_DRV | \
  33. NETIF_MSG_LINK | \
  34. NETIF_MSG_RX_ERR | \
  35. NETIF_MSG_TX_ERR)
  36. #define RIONET_DOORBELL_JOIN 0x1000
  37. #define RIONET_DOORBELL_LEAVE 0x1001
  38. #define RIONET_MAILBOX 0
  39. #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
  40. #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
  41. static LIST_HEAD(rionet_peers);
  42. struct rionet_private {
  43. struct rio_mport *mport;
  44. struct sk_buff *rx_skb[RIONET_RX_RING_SIZE];
  45. struct sk_buff *tx_skb[RIONET_TX_RING_SIZE];
  46. int rx_slot;
  47. int tx_slot;
  48. int tx_cnt;
  49. int ack_slot;
  50. spinlock_t lock;
  51. spinlock_t tx_lock;
  52. u32 msg_enable;
  53. };
  54. struct rionet_peer {
  55. struct list_head node;
  56. struct rio_dev *rdev;
  57. struct resource *res;
  58. };
  59. static int rionet_check = 0;
  60. static int rionet_capable = 1;
  61. /*
  62. * This is a fast lookup table for for translating TX
  63. * Ethernet packets into a destination RIO device. It
  64. * could be made into a hash table to save memory depending
  65. * on system trade-offs.
  66. */
  67. static struct rio_dev **rionet_active;
  68. #define is_rionet_capable(pef, src_ops, dst_ops) \
  69. ((pef & RIO_PEF_INB_MBOX) && \
  70. (pef & RIO_PEF_INB_DOORBELL) && \
  71. (src_ops & RIO_SRC_OPS_DOORBELL) && \
  72. (dst_ops & RIO_DST_OPS_DOORBELL))
  73. #define dev_rionet_capable(dev) \
  74. is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops)
  75. #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
  76. #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
  77. static int rionet_rx_clean(struct net_device *ndev)
  78. {
  79. int i;
  80. int error = 0;
  81. struct rionet_private *rnet = netdev_priv(ndev);
  82. void *data;
  83. i = rnet->rx_slot;
  84. do {
  85. if (!rnet->rx_skb[i])
  86. continue;
  87. if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX)))
  88. break;
  89. rnet->rx_skb[i]->data = data;
  90. skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
  91. rnet->rx_skb[i]->protocol =
  92. eth_type_trans(rnet->rx_skb[i], ndev);
  93. error = netif_rx(rnet->rx_skb[i]);
  94. if (error == NET_RX_DROP) {
  95. ndev->stats.rx_dropped++;
  96. } else if (error == NET_RX_BAD) {
  97. if (netif_msg_rx_err(rnet))
  98. printk(KERN_WARNING "%s: bad rx packet\n",
  99. DRV_NAME);
  100. ndev->stats.rx_errors++;
  101. } else {
  102. ndev->stats.rx_packets++;
  103. ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE;
  104. }
  105. } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot);
  106. return i;
  107. }
  108. static void rionet_rx_fill(struct net_device *ndev, int end)
  109. {
  110. int i;
  111. struct rionet_private *rnet = netdev_priv(ndev);
  112. i = rnet->rx_slot;
  113. do {
  114. rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE);
  115. if (!rnet->rx_skb[i])
  116. break;
  117. rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX,
  118. rnet->rx_skb[i]->data);
  119. } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end);
  120. rnet->rx_slot = i;
  121. }
  122. static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
  123. struct rio_dev *rdev)
  124. {
  125. struct rionet_private *rnet = netdev_priv(ndev);
  126. rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
  127. rnet->tx_skb[rnet->tx_slot] = skb;
  128. ndev->stats.tx_packets++;
  129. ndev->stats.tx_bytes += skb->len;
  130. if (++rnet->tx_cnt == RIONET_TX_RING_SIZE)
  131. netif_stop_queue(ndev);
  132. ++rnet->tx_slot;
  133. rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
  134. if (netif_msg_tx_queued(rnet))
  135. printk(KERN_INFO "%s: queued skb %8.8x len %8.8x\n", DRV_NAME,
  136. (u32) skb, skb->len);
  137. return 0;
  138. }
  139. static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  140. {
  141. int i;
  142. struct rionet_private *rnet = netdev_priv(ndev);
  143. struct ethhdr *eth = (struct ethhdr *)skb->data;
  144. u16 destid;
  145. unsigned long flags;
  146. local_irq_save(flags);
  147. if (!spin_trylock(&rnet->tx_lock)) {
  148. local_irq_restore(flags);
  149. return NETDEV_TX_LOCKED;
  150. }
  151. if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
  152. netif_stop_queue(ndev);
  153. spin_unlock_irqrestore(&rnet->tx_lock, flags);
  154. printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
  155. ndev->name);
  156. return NETDEV_TX_BUSY;
  157. }
  158. if (eth->h_dest[0] & 0x01) {
  159. for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
  160. i++)
  161. if (rionet_active[i])
  162. rionet_queue_tx_msg(skb, ndev,
  163. rionet_active[i]);
  164. } else if (RIONET_MAC_MATCH(eth->h_dest)) {
  165. destid = RIONET_GET_DESTID(eth->h_dest);
  166. if (rionet_active[destid])
  167. rionet_queue_tx_msg(skb, ndev, rionet_active[destid]);
  168. }
  169. spin_unlock_irqrestore(&rnet->tx_lock, flags);
  170. return 0;
  171. }
  172. static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid,
  173. u16 info)
  174. {
  175. struct net_device *ndev = dev_id;
  176. struct rionet_private *rnet = netdev_priv(ndev);
  177. struct rionet_peer *peer;
  178. if (netif_msg_intr(rnet))
  179. printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
  180. DRV_NAME, sid, tid, info);
  181. if (info == RIONET_DOORBELL_JOIN) {
  182. if (!rionet_active[sid]) {
  183. list_for_each_entry(peer, &rionet_peers, node) {
  184. if (peer->rdev->destid == sid)
  185. rionet_active[sid] = peer->rdev;
  186. }
  187. rio_mport_send_doorbell(mport, sid,
  188. RIONET_DOORBELL_JOIN);
  189. }
  190. } else if (info == RIONET_DOORBELL_LEAVE) {
  191. rionet_active[sid] = NULL;
  192. } else {
  193. if (netif_msg_intr(rnet))
  194. printk(KERN_WARNING "%s: unhandled doorbell\n",
  195. DRV_NAME);
  196. }
  197. }
  198. static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
  199. {
  200. int n;
  201. struct net_device *ndev = dev_id;
  202. struct rionet_private *rnet = netdev_priv(ndev);
  203. if (netif_msg_intr(rnet))
  204. printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n",
  205. DRV_NAME, mbox, slot);
  206. spin_lock(&rnet->lock);
  207. if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot)
  208. rionet_rx_fill(ndev, n);
  209. spin_unlock(&rnet->lock);
  210. }
  211. static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
  212. {
  213. struct net_device *ndev = dev_id;
  214. struct rionet_private *rnet = netdev_priv(ndev);
  215. spin_lock(&rnet->lock);
  216. if (netif_msg_intr(rnet))
  217. printk(KERN_INFO
  218. "%s: outbound message event, mbox %d slot %d\n",
  219. DRV_NAME, mbox, slot);
  220. while (rnet->tx_cnt && (rnet->ack_slot != slot)) {
  221. /* dma unmap single */
  222. dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]);
  223. rnet->tx_skb[rnet->ack_slot] = NULL;
  224. ++rnet->ack_slot;
  225. rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1);
  226. rnet->tx_cnt--;
  227. }
  228. if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
  229. netif_wake_queue(ndev);
  230. spin_unlock(&rnet->lock);
  231. }
  232. static int rionet_open(struct net_device *ndev)
  233. {
  234. int i, rc = 0;
  235. struct rionet_peer *peer, *tmp;
  236. u32 pwdcsr;
  237. struct rionet_private *rnet = netdev_priv(ndev);
  238. if (netif_msg_ifup(rnet))
  239. printk(KERN_INFO "%s: open\n", DRV_NAME);
  240. if ((rc = rio_request_inb_dbell(rnet->mport,
  241. (void *)ndev,
  242. RIONET_DOORBELL_JOIN,
  243. RIONET_DOORBELL_LEAVE,
  244. rionet_dbell_event)) < 0)
  245. goto out;
  246. if ((rc = rio_request_inb_mbox(rnet->mport,
  247. (void *)ndev,
  248. RIONET_MAILBOX,
  249. RIONET_RX_RING_SIZE,
  250. rionet_inb_msg_event)) < 0)
  251. goto out;
  252. if ((rc = rio_request_outb_mbox(rnet->mport,
  253. (void *)ndev,
  254. RIONET_MAILBOX,
  255. RIONET_TX_RING_SIZE,
  256. rionet_outb_msg_event)) < 0)
  257. goto out;
  258. /* Initialize inbound message ring */
  259. for (i = 0; i < RIONET_RX_RING_SIZE; i++)
  260. rnet->rx_skb[i] = NULL;
  261. rnet->rx_slot = 0;
  262. rionet_rx_fill(ndev, 0);
  263. rnet->tx_slot = 0;
  264. rnet->tx_cnt = 0;
  265. rnet->ack_slot = 0;
  266. netif_carrier_on(ndev);
  267. netif_start_queue(ndev);
  268. list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
  269. if (!(peer->res = rio_request_outb_dbell(peer->rdev,
  270. RIONET_DOORBELL_JOIN,
  271. RIONET_DOORBELL_LEAVE)))
  272. {
  273. printk(KERN_ERR "%s: error requesting doorbells\n",
  274. DRV_NAME);
  275. continue;
  276. }
  277. /*
  278. * If device has initialized inbound doorbells,
  279. * send a join message
  280. */
  281. rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr);
  282. if (pwdcsr & RIO_DOORBELL_AVAIL)
  283. rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
  284. }
  285. out:
  286. return rc;
  287. }
  288. static int rionet_close(struct net_device *ndev)
  289. {
  290. struct rionet_private *rnet = netdev_priv(ndev);
  291. struct rionet_peer *peer, *tmp;
  292. int i;
  293. if (netif_msg_ifup(rnet))
  294. printk(KERN_INFO "%s: close\n", DRV_NAME);
  295. netif_stop_queue(ndev);
  296. netif_carrier_off(ndev);
  297. for (i = 0; i < RIONET_RX_RING_SIZE; i++)
  298. kfree_skb(rnet->rx_skb[i]);
  299. list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
  300. if (rionet_active[peer->rdev->destid]) {
  301. rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
  302. rionet_active[peer->rdev->destid] = NULL;
  303. }
  304. rio_release_outb_dbell(peer->rdev, peer->res);
  305. }
  306. rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
  307. RIONET_DOORBELL_LEAVE);
  308. rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
  309. rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX);
  310. return 0;
  311. }
  312. static void rionet_remove(struct rio_dev *rdev)
  313. {
  314. struct net_device *ndev = NULL;
  315. struct rionet_peer *peer, *tmp;
  316. free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
  317. __ilog2(sizeof(void *)) + 4 : 0);
  318. unregister_netdev(ndev);
  319. kfree(ndev);
  320. list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
  321. list_del(&peer->node);
  322. kfree(peer);
  323. }
  324. }
  325. static void rionet_get_drvinfo(struct net_device *ndev,
  326. struct ethtool_drvinfo *info)
  327. {
  328. struct rionet_private *rnet = netdev_priv(ndev);
  329. strcpy(info->driver, DRV_NAME);
  330. strcpy(info->version, DRV_VERSION);
  331. strcpy(info->fw_version, "n/a");
  332. strcpy(info->bus_info, rnet->mport->name);
  333. }
  334. static u32 rionet_get_msglevel(struct net_device *ndev)
  335. {
  336. struct rionet_private *rnet = netdev_priv(ndev);
  337. return rnet->msg_enable;
  338. }
  339. static void rionet_set_msglevel(struct net_device *ndev, u32 value)
  340. {
  341. struct rionet_private *rnet = netdev_priv(ndev);
  342. rnet->msg_enable = value;
  343. }
  344. static const struct ethtool_ops rionet_ethtool_ops = {
  345. .get_drvinfo = rionet_get_drvinfo,
  346. .get_msglevel = rionet_get_msglevel,
  347. .set_msglevel = rionet_set_msglevel,
  348. .get_link = ethtool_op_get_link,
  349. };
  350. static const struct net_device_ops rionet_netdev_ops = {
  351. .ndo_open = rionet_open,
  352. .ndo_stop = rionet_close,
  353. .ndo_start_xmit = rionet_start_xmit,
  354. .ndo_change_mtu = eth_change_mtu,
  355. .ndo_validate_addr = eth_validate_addr,
  356. .ndo_set_mac_address = eth_mac_addr,
  357. };
  358. static int rionet_setup_netdev(struct rio_mport *mport)
  359. {
  360. int rc = 0;
  361. struct net_device *ndev = NULL;
  362. struct rionet_private *rnet;
  363. u16 device_id;
  364. /* Allocate our net_device structure */
  365. ndev = alloc_etherdev(sizeof(struct rionet_private));
  366. if (ndev == NULL) {
  367. printk(KERN_INFO "%s: could not allocate ethernet device.\n",
  368. DRV_NAME);
  369. rc = -ENOMEM;
  370. goto out;
  371. }
  372. rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
  373. mport->sys_size ? __ilog2(sizeof(void *)) + 4 : 0);
  374. if (!rionet_active) {
  375. rc = -ENOMEM;
  376. goto out;
  377. }
  378. memset((void *)rionet_active, 0, sizeof(void *) *
  379. RIO_MAX_ROUTE_ENTRIES(mport->sys_size));
  380. /* Set up private area */
  381. rnet = netdev_priv(ndev);
  382. rnet->mport = mport;
  383. /* Set the default MAC address */
  384. device_id = rio_local_get_device_id(mport);
  385. ndev->dev_addr[0] = 0x00;
  386. ndev->dev_addr[1] = 0x01;
  387. ndev->dev_addr[2] = 0x00;
  388. ndev->dev_addr[3] = 0x01;
  389. ndev->dev_addr[4] = device_id >> 8;
  390. ndev->dev_addr[5] = device_id & 0xff;
  391. ndev->netdev_ops = &rionet_netdev_ops;
  392. ndev->mtu = RIO_MAX_MSG_SIZE - 14;
  393. ndev->features = NETIF_F_LLTX;
  394. SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
  395. spin_lock_init(&rnet->lock);
  396. spin_lock_init(&rnet->tx_lock);
  397. rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
  398. rc = register_netdev(ndev);
  399. if (rc != 0)
  400. goto out;
  401. printk("%s: %s %s Version %s, MAC %pM\n",
  402. ndev->name,
  403. DRV_NAME,
  404. DRV_DESC,
  405. DRV_VERSION,
  406. ndev->dev_addr);
  407. out:
  408. return rc;
  409. }
  410. /*
  411. * XXX Make multi-net safe
  412. */
  413. static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
  414. {
  415. int rc = -ENODEV;
  416. u32 lpef, lsrc_ops, ldst_ops;
  417. struct rionet_peer *peer;
  418. /* If local device is not rionet capable, give up quickly */
  419. if (!rionet_capable)
  420. goto out;
  421. /*
  422. * First time through, make sure local device is rionet
  423. * capable, setup netdev, and set flags so this is skipped
  424. * on later probes
  425. */
  426. if (!rionet_check) {
  427. rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef);
  428. rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
  429. &lsrc_ops);
  430. rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
  431. &ldst_ops);
  432. if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) {
  433. printk(KERN_ERR
  434. "%s: local device is not network capable\n",
  435. DRV_NAME);
  436. rionet_check = 1;
  437. rionet_capable = 0;
  438. goto out;
  439. }
  440. rc = rionet_setup_netdev(rdev->net->hport);
  441. rionet_check = 1;
  442. }
  443. /*
  444. * If the remote device has mailbox/doorbell capabilities,
  445. * add it to the peer list.
  446. */
  447. if (dev_rionet_capable(rdev)) {
  448. if (!(peer = kmalloc(sizeof(struct rionet_peer), GFP_KERNEL))) {
  449. rc = -ENOMEM;
  450. goto out;
  451. }
  452. peer->rdev = rdev;
  453. list_add_tail(&peer->node, &rionet_peers);
  454. }
  455. out:
  456. return rc;
  457. }
  458. static struct rio_device_id rionet_id_table[] = {
  459. {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)}
  460. };
  461. static struct rio_driver rionet_driver = {
  462. .name = "rionet",
  463. .id_table = rionet_id_table,
  464. .probe = rionet_probe,
  465. .remove = rionet_remove,
  466. };
  467. static int __init rionet_init(void)
  468. {
  469. return rio_register_driver(&rionet_driver);
  470. }
  471. static void __exit rionet_exit(void)
  472. {
  473. rio_unregister_driver(&rionet_driver);
  474. }
  475. module_init(rionet_init);
  476. module_exit(rionet_exit);