sunvnet.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296
  1. /* sunvnet.c: Sun LDOM Virtual Network Driver.
  2. *
  3. * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/module.h>
  6. #include <linux/kernel.h>
  7. #include <linux/types.h>
  8. #include <linux/slab.h>
  9. #include <linux/delay.h>
  10. #include <linux/init.h>
  11. #include <linux/netdevice.h>
  12. #include <linux/ethtool.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/mutex.h>
  15. #include <asm/vio.h>
  16. #include <asm/ldc.h>
  17. #include "sunvnet.h"
  18. #define DRV_MODULE_NAME "sunvnet"
  19. #define PFX DRV_MODULE_NAME ": "
  20. #define DRV_MODULE_VERSION "1.0"
  21. #define DRV_MODULE_RELDATE "June 25, 2007"
  22. static char version[] __devinitdata =
  23. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  24. MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  25. MODULE_DESCRIPTION("Sun LDOM virtual network driver");
  26. MODULE_LICENSE("GPL");
  27. MODULE_VERSION(DRV_MODULE_VERSION);
  28. /* Ordered from largest major to lowest */
  29. static struct vio_version vnet_versions[] = {
  30. { .major = 1, .minor = 0 },
  31. };
  32. static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
  33. {
  34. return vio_dring_avail(dr, VNET_TX_RING_SIZE);
  35. }
  36. static int vnet_handle_unknown(struct vnet_port *port, void *arg)
  37. {
  38. struct vio_msg_tag *pkt = arg;
  39. printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
  40. pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
  41. printk(KERN_ERR PFX "Resetting connection.\n");
  42. ldc_disconnect(port->vio.lp);
  43. return -ECONNRESET;
  44. }
  45. static int vnet_send_attr(struct vio_driver_state *vio)
  46. {
  47. struct vnet_port *port = to_vnet_port(vio);
  48. struct net_device *dev = port->vp->dev;
  49. struct vio_net_attr_info pkt;
  50. int i;
  51. memset(&pkt, 0, sizeof(pkt));
  52. pkt.tag.type = VIO_TYPE_CTRL;
  53. pkt.tag.stype = VIO_SUBTYPE_INFO;
  54. pkt.tag.stype_env = VIO_ATTR_INFO;
  55. pkt.tag.sid = vio_send_sid(vio);
  56. pkt.xfer_mode = VIO_DRING_MODE;
  57. pkt.addr_type = VNET_ADDR_ETHERMAC;
  58. pkt.ack_freq = 0;
  59. for (i = 0; i < 6; i++)
  60. pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
  61. pkt.mtu = ETH_FRAME_LEN;
  62. viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
  63. "ackfreq[%u] mtu[%llu]\n",
  64. pkt.xfer_mode, pkt.addr_type,
  65. (unsigned long long) pkt.addr,
  66. pkt.ack_freq,
  67. (unsigned long long) pkt.mtu);
  68. return vio_ldc_send(vio, &pkt, sizeof(pkt));
  69. }
  70. static int handle_attr_info(struct vio_driver_state *vio,
  71. struct vio_net_attr_info *pkt)
  72. {
  73. viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
  74. "ackfreq[%u] mtu[%llu]\n",
  75. pkt->xfer_mode, pkt->addr_type,
  76. (unsigned long long) pkt->addr,
  77. pkt->ack_freq,
  78. (unsigned long long) pkt->mtu);
  79. pkt->tag.sid = vio_send_sid(vio);
  80. if (pkt->xfer_mode != VIO_DRING_MODE ||
  81. pkt->addr_type != VNET_ADDR_ETHERMAC ||
  82. pkt->mtu != ETH_FRAME_LEN) {
  83. viodbg(HS, "SEND NET ATTR NACK\n");
  84. pkt->tag.stype = VIO_SUBTYPE_NACK;
  85. (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
  86. return -ECONNRESET;
  87. } else {
  88. viodbg(HS, "SEND NET ATTR ACK\n");
  89. pkt->tag.stype = VIO_SUBTYPE_ACK;
  90. return vio_ldc_send(vio, pkt, sizeof(*pkt));
  91. }
  92. }
  93. static int handle_attr_ack(struct vio_driver_state *vio,
  94. struct vio_net_attr_info *pkt)
  95. {
  96. viodbg(HS, "GOT NET ATTR ACK\n");
  97. return 0;
  98. }
  99. static int handle_attr_nack(struct vio_driver_state *vio,
  100. struct vio_net_attr_info *pkt)
  101. {
  102. viodbg(HS, "GOT NET ATTR NACK\n");
  103. return -ECONNRESET;
  104. }
  105. static int vnet_handle_attr(struct vio_driver_state *vio, void *arg)
  106. {
  107. struct vio_net_attr_info *pkt = arg;
  108. switch (pkt->tag.stype) {
  109. case VIO_SUBTYPE_INFO:
  110. return handle_attr_info(vio, pkt);
  111. case VIO_SUBTYPE_ACK:
  112. return handle_attr_ack(vio, pkt);
  113. case VIO_SUBTYPE_NACK:
  114. return handle_attr_nack(vio, pkt);
  115. default:
  116. return -ECONNRESET;
  117. }
  118. }
  119. static void vnet_handshake_complete(struct vio_driver_state *vio)
  120. {
  121. struct vio_dring_state *dr;
  122. dr = &vio->drings[VIO_DRIVER_RX_RING];
  123. dr->snd_nxt = dr->rcv_nxt = 1;
  124. dr = &vio->drings[VIO_DRIVER_TX_RING];
  125. dr->snd_nxt = dr->rcv_nxt = 1;
  126. }
  127. /* The hypervisor interface that implements copying to/from imported
  128. * memory from another domain requires that copies are done to 8-byte
  129. * aligned buffers, and that the lengths of such copies are also 8-byte
  130. * multiples.
  131. *
  132. * So we align skb->data to an 8-byte multiple and pad-out the data
  133. * area so we can round the copy length up to the next multiple of
  134. * 8 for the copy.
  135. *
  136. * The transmitter puts the actual start of the packet 6 bytes into
  137. * the buffer it sends over, so that the IP headers after the ethernet
  138. * header are aligned properly. These 6 bytes are not in the descriptor
  139. * length, they are simply implied. This offset is represented using
  140. * the VNET_PACKET_SKIP macro.
  141. */
  142. static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
  143. unsigned int len)
  144. {
  145. struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
  146. unsigned long addr, off;
  147. if (unlikely(!skb))
  148. return NULL;
  149. addr = (unsigned long) skb->data;
  150. off = ((addr + 7UL) & ~7UL) - addr;
  151. if (off)
  152. skb_reserve(skb, off);
  153. return skb;
  154. }
  155. static int vnet_rx_one(struct vnet_port *port, unsigned int len,
  156. struct ldc_trans_cookie *cookies, int ncookies)
  157. {
  158. struct net_device *dev = port->vp->dev;
  159. unsigned int copy_len;
  160. struct sk_buff *skb;
  161. int err;
  162. err = -EMSGSIZE;
  163. if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) {
  164. dev->stats.rx_length_errors++;
  165. goto out_dropped;
  166. }
  167. skb = alloc_and_align_skb(dev, len);
  168. err = -ENOMEM;
  169. if (unlikely(!skb)) {
  170. dev->stats.rx_missed_errors++;
  171. goto out_dropped;
  172. }
  173. copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
  174. skb_put(skb, copy_len);
  175. err = ldc_copy(port->vio.lp, LDC_COPY_IN,
  176. skb->data, copy_len, 0,
  177. cookies, ncookies);
  178. if (unlikely(err < 0)) {
  179. dev->stats.rx_frame_errors++;
  180. goto out_free_skb;
  181. }
  182. skb_pull(skb, VNET_PACKET_SKIP);
  183. skb_trim(skb, len);
  184. skb->protocol = eth_type_trans(skb, dev);
  185. dev->stats.rx_packets++;
  186. dev->stats.rx_bytes += len;
  187. netif_rx(skb);
  188. return 0;
  189. out_free_skb:
  190. kfree_skb(skb);
  191. out_dropped:
  192. dev->stats.rx_dropped++;
  193. return err;
  194. }
  195. static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
  196. u32 start, u32 end, u8 vio_dring_state)
  197. {
  198. struct vio_dring_data hdr = {
  199. .tag = {
  200. .type = VIO_TYPE_DATA,
  201. .stype = VIO_SUBTYPE_ACK,
  202. .stype_env = VIO_DRING_DATA,
  203. .sid = vio_send_sid(&port->vio),
  204. },
  205. .dring_ident = dr->ident,
  206. .start_idx = start,
  207. .end_idx = end,
  208. .state = vio_dring_state,
  209. };
  210. int err, delay;
  211. hdr.seq = dr->snd_nxt;
  212. delay = 1;
  213. do {
  214. err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
  215. if (err > 0) {
  216. dr->snd_nxt++;
  217. break;
  218. }
  219. udelay(delay);
  220. if ((delay <<= 1) > 128)
  221. delay = 128;
  222. } while (err == -EAGAIN);
  223. return err;
  224. }
  225. static u32 next_idx(u32 idx, struct vio_dring_state *dr)
  226. {
  227. if (++idx == dr->num_entries)
  228. idx = 0;
  229. return idx;
  230. }
  231. static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
  232. {
  233. if (idx == 0)
  234. idx = dr->num_entries - 1;
  235. else
  236. idx--;
  237. return idx;
  238. }
  239. static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
  240. struct vio_dring_state *dr,
  241. u32 index)
  242. {
  243. struct vio_net_desc *desc = port->vio.desc_buf;
  244. int err;
  245. err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
  246. (index * dr->entry_size),
  247. dr->cookies, dr->ncookies);
  248. if (err < 0)
  249. return ERR_PTR(err);
  250. return desc;
  251. }
  252. static int put_rx_desc(struct vnet_port *port,
  253. struct vio_dring_state *dr,
  254. struct vio_net_desc *desc,
  255. u32 index)
  256. {
  257. int err;
  258. err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
  259. (index * dr->entry_size),
  260. dr->cookies, dr->ncookies);
  261. if (err < 0)
  262. return err;
  263. return 0;
  264. }
  265. static int vnet_walk_rx_one(struct vnet_port *port,
  266. struct vio_dring_state *dr,
  267. u32 index, int *needs_ack)
  268. {
  269. struct vio_net_desc *desc = get_rx_desc(port, dr, index);
  270. struct vio_driver_state *vio = &port->vio;
  271. int err;
  272. if (IS_ERR(desc))
  273. return PTR_ERR(desc);
  274. viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
  275. desc->hdr.state, desc->hdr.ack,
  276. desc->size, desc->ncookies,
  277. desc->cookies[0].cookie_addr,
  278. desc->cookies[0].cookie_size);
  279. if (desc->hdr.state != VIO_DESC_READY)
  280. return 1;
  281. err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
  282. if (err == -ECONNRESET)
  283. return err;
  284. desc->hdr.state = VIO_DESC_DONE;
  285. err = put_rx_desc(port, dr, desc, index);
  286. if (err < 0)
  287. return err;
  288. *needs_ack = desc->hdr.ack;
  289. return 0;
  290. }
  291. static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
  292. u32 start, u32 end)
  293. {
  294. struct vio_driver_state *vio = &port->vio;
  295. int ack_start = -1, ack_end = -1;
  296. end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
  297. viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
  298. while (start != end) {
  299. int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
  300. if (err == -ECONNRESET)
  301. return err;
  302. if (err != 0)
  303. break;
  304. if (ack_start == -1)
  305. ack_start = start;
  306. ack_end = start;
  307. start = next_idx(start, dr);
  308. if (ack && start != end) {
  309. err = vnet_send_ack(port, dr, ack_start, ack_end,
  310. VIO_DRING_ACTIVE);
  311. if (err == -ECONNRESET)
  312. return err;
  313. ack_start = -1;
  314. }
  315. }
  316. if (unlikely(ack_start == -1))
  317. ack_start = ack_end = prev_idx(start, dr);
  318. return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
  319. }
  320. static int vnet_rx(struct vnet_port *port, void *msgbuf)
  321. {
  322. struct vio_dring_data *pkt = msgbuf;
  323. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
  324. struct vio_driver_state *vio = &port->vio;
  325. viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
  326. pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
  327. if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
  328. return 0;
  329. if (unlikely(pkt->seq != dr->rcv_nxt)) {
  330. printk(KERN_ERR PFX "RX out of sequence seq[0x%llx] "
  331. "rcv_nxt[0x%llx]\n", pkt->seq, dr->rcv_nxt);
  332. return 0;
  333. }
  334. dr->rcv_nxt++;
  335. /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
  336. return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
  337. }
  338. static int idx_is_pending(struct vio_dring_state *dr, u32 end)
  339. {
  340. u32 idx = dr->cons;
  341. int found = 0;
  342. while (idx != dr->prod) {
  343. if (idx == end) {
  344. found = 1;
  345. break;
  346. }
  347. idx = next_idx(idx, dr);
  348. }
  349. return found;
  350. }
  351. static int vnet_ack(struct vnet_port *port, void *msgbuf)
  352. {
  353. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  354. struct vio_dring_data *pkt = msgbuf;
  355. struct net_device *dev;
  356. struct vnet *vp;
  357. u32 end;
  358. if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
  359. return 0;
  360. end = pkt->end_idx;
  361. if (unlikely(!idx_is_pending(dr, end)))
  362. return 0;
  363. dr->cons = next_idx(end, dr);
  364. vp = port->vp;
  365. dev = vp->dev;
  366. if (unlikely(netif_queue_stopped(dev) &&
  367. vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
  368. return 1;
  369. return 0;
  370. }
  371. static int vnet_nack(struct vnet_port *port, void *msgbuf)
  372. {
  373. /* XXX just reset or similar XXX */
  374. return 0;
  375. }
  376. static int handle_mcast(struct vnet_port *port, void *msgbuf)
  377. {
  378. struct vio_net_mcast_info *pkt = msgbuf;
  379. if (pkt->tag.stype != VIO_SUBTYPE_ACK)
  380. printk(KERN_ERR PFX "%s: Got unexpected MCAST reply "
  381. "[%02x:%02x:%04x:%08x]\n",
  382. port->vp->dev->name,
  383. pkt->tag.type,
  384. pkt->tag.stype,
  385. pkt->tag.stype_env,
  386. pkt->tag.sid);
  387. return 0;
  388. }
  389. static void maybe_tx_wakeup(struct vnet *vp)
  390. {
  391. struct net_device *dev = vp->dev;
  392. netif_tx_lock(dev);
  393. if (likely(netif_queue_stopped(dev))) {
  394. struct vnet_port *port;
  395. int wake = 1;
  396. list_for_each_entry(port, &vp->port_list, list) {
  397. struct vio_dring_state *dr;
  398. dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  399. if (vnet_tx_dring_avail(dr) <
  400. VNET_TX_WAKEUP_THRESH(dr)) {
  401. wake = 0;
  402. break;
  403. }
  404. }
  405. if (wake)
  406. netif_wake_queue(dev);
  407. }
  408. netif_tx_unlock(dev);
  409. }
  410. static void vnet_event(void *arg, int event)
  411. {
  412. struct vnet_port *port = arg;
  413. struct vio_driver_state *vio = &port->vio;
  414. unsigned long flags;
  415. int tx_wakeup, err;
  416. spin_lock_irqsave(&vio->lock, flags);
  417. if (unlikely(event == LDC_EVENT_RESET ||
  418. event == LDC_EVENT_UP)) {
  419. vio_link_state_change(vio, event);
  420. spin_unlock_irqrestore(&vio->lock, flags);
  421. if (event == LDC_EVENT_RESET)
  422. vio_port_up(vio);
  423. return;
  424. }
  425. if (unlikely(event != LDC_EVENT_DATA_READY)) {
  426. printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
  427. spin_unlock_irqrestore(&vio->lock, flags);
  428. return;
  429. }
  430. tx_wakeup = err = 0;
  431. while (1) {
  432. union {
  433. struct vio_msg_tag tag;
  434. u64 raw[8];
  435. } msgbuf;
  436. err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
  437. if (unlikely(err < 0)) {
  438. if (err == -ECONNRESET)
  439. vio_conn_reset(vio);
  440. break;
  441. }
  442. if (err == 0)
  443. break;
  444. viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
  445. msgbuf.tag.type,
  446. msgbuf.tag.stype,
  447. msgbuf.tag.stype_env,
  448. msgbuf.tag.sid);
  449. err = vio_validate_sid(vio, &msgbuf.tag);
  450. if (err < 0)
  451. break;
  452. if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
  453. if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
  454. err = vnet_rx(port, &msgbuf);
  455. } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
  456. err = vnet_ack(port, &msgbuf);
  457. if (err > 0)
  458. tx_wakeup |= err;
  459. } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
  460. err = vnet_nack(port, &msgbuf);
  461. }
  462. } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
  463. if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
  464. err = handle_mcast(port, &msgbuf);
  465. else
  466. err = vio_control_pkt_engine(vio, &msgbuf);
  467. if (err)
  468. break;
  469. } else {
  470. err = vnet_handle_unknown(port, &msgbuf);
  471. }
  472. if (err == -ECONNRESET)
  473. break;
  474. }
  475. spin_unlock(&vio->lock);
  476. if (unlikely(tx_wakeup && err != -ECONNRESET))
  477. maybe_tx_wakeup(port->vp);
  478. local_irq_restore(flags);
  479. }
  480. static int __vnet_tx_trigger(struct vnet_port *port)
  481. {
  482. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  483. struct vio_dring_data hdr = {
  484. .tag = {
  485. .type = VIO_TYPE_DATA,
  486. .stype = VIO_SUBTYPE_INFO,
  487. .stype_env = VIO_DRING_DATA,
  488. .sid = vio_send_sid(&port->vio),
  489. },
  490. .dring_ident = dr->ident,
  491. .start_idx = dr->prod,
  492. .end_idx = (u32) -1,
  493. };
  494. int err, delay;
  495. hdr.seq = dr->snd_nxt;
  496. delay = 1;
  497. do {
  498. err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
  499. if (err > 0) {
  500. dr->snd_nxt++;
  501. break;
  502. }
  503. udelay(delay);
  504. if ((delay <<= 1) > 128)
  505. delay = 128;
  506. } while (err == -EAGAIN);
  507. return err;
  508. }
  509. struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
  510. {
  511. unsigned int hash = vnet_hashfn(skb->data);
  512. struct hlist_head *hp = &vp->port_hash[hash];
  513. struct hlist_node *n;
  514. struct vnet_port *port;
  515. hlist_for_each_entry(port, n, hp, hash) {
  516. if (!compare_ether_addr(port->raddr, skb->data))
  517. return port;
  518. }
  519. port = NULL;
  520. if (!list_empty(&vp->port_list))
  521. port = list_entry(vp->port_list.next, struct vnet_port, list);
  522. return port;
  523. }
  524. struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
  525. {
  526. struct vnet_port *ret;
  527. unsigned long flags;
  528. spin_lock_irqsave(&vp->lock, flags);
  529. ret = __tx_port_find(vp, skb);
  530. spin_unlock_irqrestore(&vp->lock, flags);
  531. return ret;
  532. }
  533. static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  534. {
  535. struct vnet *vp = netdev_priv(dev);
  536. struct vnet_port *port = tx_port_find(vp, skb);
  537. struct vio_dring_state *dr;
  538. struct vio_net_desc *d;
  539. unsigned long flags;
  540. unsigned int len;
  541. void *tx_buf;
  542. int i, err;
  543. if (unlikely(!port))
  544. goto out_dropped;
  545. spin_lock_irqsave(&port->vio.lock, flags);
  546. dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  547. if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
  548. if (!netif_queue_stopped(dev)) {
  549. netif_stop_queue(dev);
  550. /* This is a hard error, log it. */
  551. printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
  552. "queue awake!\n", dev->name);
  553. dev->stats.tx_errors++;
  554. }
  555. spin_unlock_irqrestore(&port->vio.lock, flags);
  556. return NETDEV_TX_BUSY;
  557. }
  558. d = vio_dring_cur(dr);
  559. tx_buf = port->tx_bufs[dr->prod].buf;
  560. skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len);
  561. len = skb->len;
  562. if (len < ETH_ZLEN) {
  563. len = ETH_ZLEN;
  564. memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len);
  565. }
  566. d->hdr.ack = VIO_ACK_ENABLE;
  567. d->size = len;
  568. d->ncookies = port->tx_bufs[dr->prod].ncookies;
  569. for (i = 0; i < d->ncookies; i++)
  570. d->cookies[i] = port->tx_bufs[dr->prod].cookies[i];
  571. /* This has to be a non-SMP write barrier because we are writing
  572. * to memory which is shared with the peer LDOM.
  573. */
  574. wmb();
  575. d->hdr.state = VIO_DESC_READY;
  576. err = __vnet_tx_trigger(port);
  577. if (unlikely(err < 0)) {
  578. printk(KERN_INFO PFX "%s: TX trigger error %d\n",
  579. dev->name, err);
  580. d->hdr.state = VIO_DESC_FREE;
  581. dev->stats.tx_carrier_errors++;
  582. goto out_dropped_unlock;
  583. }
  584. dev->stats.tx_packets++;
  585. dev->stats.tx_bytes += skb->len;
  586. dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
  587. if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
  588. netif_stop_queue(dev);
  589. if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
  590. netif_wake_queue(dev);
  591. }
  592. spin_unlock_irqrestore(&port->vio.lock, flags);
  593. dev_kfree_skb(skb);
  594. dev->trans_start = jiffies;
  595. return NETDEV_TX_OK;
  596. out_dropped_unlock:
  597. spin_unlock_irqrestore(&port->vio.lock, flags);
  598. out_dropped:
  599. dev_kfree_skb(skb);
  600. dev->stats.tx_dropped++;
  601. return NETDEV_TX_OK;
  602. }
  603. static void vnet_tx_timeout(struct net_device *dev)
  604. {
  605. /* XXX Implement me XXX */
  606. }
  607. static int vnet_open(struct net_device *dev)
  608. {
  609. netif_carrier_on(dev);
  610. netif_start_queue(dev);
  611. return 0;
  612. }
  613. static int vnet_close(struct net_device *dev)
  614. {
  615. netif_stop_queue(dev);
  616. netif_carrier_off(dev);
  617. return 0;
  618. }
  619. static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
  620. {
  621. struct vnet_mcast_entry *m;
  622. for (m = vp->mcast_list; m; m = m->next) {
  623. if (!memcmp(m->addr, addr, ETH_ALEN))
  624. return m;
  625. }
  626. return NULL;
  627. }
  628. static void __update_mc_list(struct vnet *vp, struct net_device *dev)
  629. {
  630. struct dev_addr_list *p;
  631. for (p = dev->mc_list; p; p = p->next) {
  632. struct vnet_mcast_entry *m;
  633. m = __vnet_mc_find(vp, p->dmi_addr);
  634. if (m) {
  635. m->hit = 1;
  636. continue;
  637. }
  638. if (!m) {
  639. m = kzalloc(sizeof(*m), GFP_ATOMIC);
  640. if (!m)
  641. continue;
  642. memcpy(m->addr, p->dmi_addr, ETH_ALEN);
  643. m->hit = 1;
  644. m->next = vp->mcast_list;
  645. vp->mcast_list = m;
  646. }
  647. }
  648. }
  649. static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
  650. {
  651. struct vio_net_mcast_info info;
  652. struct vnet_mcast_entry *m, **pp;
  653. int n_addrs;
  654. memset(&info, 0, sizeof(info));
  655. info.tag.type = VIO_TYPE_CTRL;
  656. info.tag.stype = VIO_SUBTYPE_INFO;
  657. info.tag.stype_env = VNET_MCAST_INFO;
  658. info.tag.sid = vio_send_sid(&port->vio);
  659. info.set = 1;
  660. n_addrs = 0;
  661. for (m = vp->mcast_list; m; m = m->next) {
  662. if (m->sent)
  663. continue;
  664. m->sent = 1;
  665. memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
  666. m->addr, ETH_ALEN);
  667. if (++n_addrs == VNET_NUM_MCAST) {
  668. info.count = n_addrs;
  669. (void) vio_ldc_send(&port->vio, &info,
  670. sizeof(info));
  671. n_addrs = 0;
  672. }
  673. }
  674. if (n_addrs) {
  675. info.count = n_addrs;
  676. (void) vio_ldc_send(&port->vio, &info, sizeof(info));
  677. }
  678. info.set = 0;
  679. n_addrs = 0;
  680. pp = &vp->mcast_list;
  681. while ((m = *pp) != NULL) {
  682. if (m->hit) {
  683. m->hit = 0;
  684. pp = &m->next;
  685. continue;
  686. }
  687. memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
  688. m->addr, ETH_ALEN);
  689. if (++n_addrs == VNET_NUM_MCAST) {
  690. info.count = n_addrs;
  691. (void) vio_ldc_send(&port->vio, &info,
  692. sizeof(info));
  693. n_addrs = 0;
  694. }
  695. *pp = m->next;
  696. kfree(m);
  697. }
  698. if (n_addrs) {
  699. info.count = n_addrs;
  700. (void) vio_ldc_send(&port->vio, &info, sizeof(info));
  701. }
  702. }
  703. static void vnet_set_rx_mode(struct net_device *dev)
  704. {
  705. struct vnet *vp = netdev_priv(dev);
  706. struct vnet_port *port;
  707. unsigned long flags;
  708. spin_lock_irqsave(&vp->lock, flags);
  709. if (!list_empty(&vp->port_list)) {
  710. port = list_entry(vp->port_list.next, struct vnet_port, list);
  711. if (port->switch_port) {
  712. __update_mc_list(vp, dev);
  713. __send_mc_list(vp, port);
  714. }
  715. }
  716. spin_unlock_irqrestore(&vp->lock, flags);
  717. }
  718. static int vnet_change_mtu(struct net_device *dev, int new_mtu)
  719. {
  720. if (new_mtu != ETH_DATA_LEN)
  721. return -EINVAL;
  722. dev->mtu = new_mtu;
  723. return 0;
  724. }
  725. static int vnet_set_mac_addr(struct net_device *dev, void *p)
  726. {
  727. return -EINVAL;
  728. }
  729. static void vnet_get_drvinfo(struct net_device *dev,
  730. struct ethtool_drvinfo *info)
  731. {
  732. strcpy(info->driver, DRV_MODULE_NAME);
  733. strcpy(info->version, DRV_MODULE_VERSION);
  734. }
  735. static u32 vnet_get_msglevel(struct net_device *dev)
  736. {
  737. struct vnet *vp = netdev_priv(dev);
  738. return vp->msg_enable;
  739. }
  740. static void vnet_set_msglevel(struct net_device *dev, u32 value)
  741. {
  742. struct vnet *vp = netdev_priv(dev);
  743. vp->msg_enable = value;
  744. }
  745. static const struct ethtool_ops vnet_ethtool_ops = {
  746. .get_drvinfo = vnet_get_drvinfo,
  747. .get_msglevel = vnet_get_msglevel,
  748. .set_msglevel = vnet_set_msglevel,
  749. .get_link = ethtool_op_get_link,
  750. };
  751. static void vnet_port_free_tx_bufs(struct vnet_port *port)
  752. {
  753. struct vio_dring_state *dr;
  754. int i;
  755. dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  756. if (dr->base) {
  757. ldc_free_exp_dring(port->vio.lp, dr->base,
  758. (dr->entry_size * dr->num_entries),
  759. dr->cookies, dr->ncookies);
  760. dr->base = NULL;
  761. dr->entry_size = 0;
  762. dr->num_entries = 0;
  763. dr->pending = 0;
  764. dr->ncookies = 0;
  765. }
  766. for (i = 0; i < VNET_TX_RING_SIZE; i++) {
  767. void *buf = port->tx_bufs[i].buf;
  768. if (!buf)
  769. continue;
  770. ldc_unmap(port->vio.lp,
  771. port->tx_bufs[i].cookies,
  772. port->tx_bufs[i].ncookies);
  773. kfree(buf);
  774. port->tx_bufs[i].buf = NULL;
  775. }
  776. }
  777. static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
  778. {
  779. struct vio_dring_state *dr;
  780. unsigned long len;
  781. int i, err, ncookies;
  782. void *dring;
  783. for (i = 0; i < VNET_TX_RING_SIZE; i++) {
  784. void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL);
  785. int map_len = (ETH_FRAME_LEN + 7) & ~7;
  786. err = -ENOMEM;
  787. if (!buf) {
  788. printk(KERN_ERR "TX buffer allocation failure\n");
  789. goto err_out;
  790. }
  791. err = -EFAULT;
  792. if ((unsigned long)buf & (8UL - 1)) {
  793. printk(KERN_ERR "TX buffer misaligned\n");
  794. kfree(buf);
  795. goto err_out;
  796. }
  797. err = ldc_map_single(port->vio.lp, buf, map_len,
  798. port->tx_bufs[i].cookies, 2,
  799. (LDC_MAP_SHADOW |
  800. LDC_MAP_DIRECT |
  801. LDC_MAP_RW));
  802. if (err < 0) {
  803. kfree(buf);
  804. goto err_out;
  805. }
  806. port->tx_bufs[i].buf = buf;
  807. port->tx_bufs[i].ncookies = err;
  808. }
  809. dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  810. len = (VNET_TX_RING_SIZE *
  811. (sizeof(struct vio_net_desc) +
  812. (sizeof(struct ldc_trans_cookie) * 2)));
  813. ncookies = VIO_MAX_RING_COOKIES;
  814. dring = ldc_alloc_exp_dring(port->vio.lp, len,
  815. dr->cookies, &ncookies,
  816. (LDC_MAP_SHADOW |
  817. LDC_MAP_DIRECT |
  818. LDC_MAP_RW));
  819. if (IS_ERR(dring)) {
  820. err = PTR_ERR(dring);
  821. goto err_out;
  822. }
  823. dr->base = dring;
  824. dr->entry_size = (sizeof(struct vio_net_desc) +
  825. (sizeof(struct ldc_trans_cookie) * 2));
  826. dr->num_entries = VNET_TX_RING_SIZE;
  827. dr->prod = dr->cons = 0;
  828. dr->pending = VNET_TX_RING_SIZE;
  829. dr->ncookies = ncookies;
  830. return 0;
  831. err_out:
  832. vnet_port_free_tx_bufs(port);
  833. return err;
  834. }
  835. static LIST_HEAD(vnet_list);
  836. static DEFINE_MUTEX(vnet_list_mutex);
  837. static const struct net_device_ops vnet_ops = {
  838. .ndo_open = vnet_open,
  839. .ndo_stop = vnet_close,
  840. .ndo_set_multicast_list = vnet_set_rx_mode,
  841. .ndo_set_mac_address = vnet_set_mac_addr,
  842. .ndo_validate_addr = eth_validate_addr,
  843. .ndo_tx_timeout = vnet_tx_timeout,
  844. .ndo_change_mtu = vnet_change_mtu,
  845. .ndo_start_xmit = vnet_start_xmit,
  846. };
  847. static struct vnet * __devinit vnet_new(const u64 *local_mac)
  848. {
  849. struct net_device *dev;
  850. struct vnet *vp;
  851. int err, i;
  852. dev = alloc_etherdev(sizeof(*vp));
  853. if (!dev) {
  854. printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
  855. return ERR_PTR(-ENOMEM);
  856. }
  857. for (i = 0; i < ETH_ALEN; i++)
  858. dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
  859. memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  860. vp = netdev_priv(dev);
  861. spin_lock_init(&vp->lock);
  862. vp->dev = dev;
  863. INIT_LIST_HEAD(&vp->port_list);
  864. for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
  865. INIT_HLIST_HEAD(&vp->port_hash[i]);
  866. INIT_LIST_HEAD(&vp->list);
  867. vp->local_mac = *local_mac;
  868. dev->netdev_ops = &vnet_ops;
  869. dev->ethtool_ops = &vnet_ethtool_ops;
  870. dev->watchdog_timeo = VNET_TX_TIMEOUT;
  871. err = register_netdev(dev);
  872. if (err) {
  873. printk(KERN_ERR PFX "Cannot register net device, "
  874. "aborting.\n");
  875. goto err_out_free_dev;
  876. }
  877. printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
  878. for (i = 0; i < 6; i++)
  879. printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
  880. list_add(&vp->list, &vnet_list);
  881. return vp;
  882. err_out_free_dev:
  883. free_netdev(dev);
  884. return ERR_PTR(err);
  885. }
  886. static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
  887. {
  888. struct vnet *iter, *vp;
  889. mutex_lock(&vnet_list_mutex);
  890. vp = NULL;
  891. list_for_each_entry(iter, &vnet_list, list) {
  892. if (iter->local_mac == *local_mac) {
  893. vp = iter;
  894. break;
  895. }
  896. }
  897. if (!vp)
  898. vp = vnet_new(local_mac);
  899. mutex_unlock(&vnet_list_mutex);
  900. return vp;
  901. }
  902. static const char *local_mac_prop = "local-mac-address";
  903. static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp,
  904. u64 port_node)
  905. {
  906. const u64 *local_mac = NULL;
  907. u64 a;
  908. mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
  909. u64 target = mdesc_arc_target(hp, a);
  910. const char *name;
  911. name = mdesc_get_property(hp, target, "name", NULL);
  912. if (!name || strcmp(name, "network"))
  913. continue;
  914. local_mac = mdesc_get_property(hp, target,
  915. local_mac_prop, NULL);
  916. if (local_mac)
  917. break;
  918. }
  919. if (!local_mac)
  920. return ERR_PTR(-ENODEV);
  921. return vnet_find_or_create(local_mac);
  922. }
  923. static struct ldc_channel_config vnet_ldc_cfg = {
  924. .event = vnet_event,
  925. .mtu = 64,
  926. .mode = LDC_MODE_UNRELIABLE,
  927. };
  928. static struct vio_driver_ops vnet_vio_ops = {
  929. .send_attr = vnet_send_attr,
  930. .handle_attr = vnet_handle_attr,
  931. .handshake_complete = vnet_handshake_complete,
  932. };
  933. static void __devinit print_version(void)
  934. {
  935. static int version_printed;
  936. if (version_printed++ == 0)
  937. printk(KERN_INFO "%s", version);
  938. }
  939. const char *remote_macaddr_prop = "remote-mac-address";
  940. static int __devinit vnet_port_probe(struct vio_dev *vdev,
  941. const struct vio_device_id *id)
  942. {
  943. struct mdesc_handle *hp;
  944. struct vnet_port *port;
  945. unsigned long flags;
  946. struct vnet *vp;
  947. const u64 *rmac;
  948. int len, i, err, switch_port;
  949. print_version();
  950. hp = mdesc_grab();
  951. vp = vnet_find_parent(hp, vdev->mp);
  952. if (IS_ERR(vp)) {
  953. printk(KERN_ERR PFX "Cannot find port parent vnet.\n");
  954. err = PTR_ERR(vp);
  955. goto err_out_put_mdesc;
  956. }
  957. rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
  958. err = -ENODEV;
  959. if (!rmac) {
  960. printk(KERN_ERR PFX "Port lacks %s property.\n",
  961. remote_macaddr_prop);
  962. goto err_out_put_mdesc;
  963. }
  964. port = kzalloc(sizeof(*port), GFP_KERNEL);
  965. err = -ENOMEM;
  966. if (!port) {
  967. printk(KERN_ERR PFX "Cannot allocate vnet_port.\n");
  968. goto err_out_put_mdesc;
  969. }
  970. for (i = 0; i < ETH_ALEN; i++)
  971. port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
  972. port->vp = vp;
  973. err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK,
  974. vnet_versions, ARRAY_SIZE(vnet_versions),
  975. &vnet_vio_ops, vp->dev->name);
  976. if (err)
  977. goto err_out_free_port;
  978. err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port);
  979. if (err)
  980. goto err_out_free_port;
  981. err = vnet_port_alloc_tx_bufs(port);
  982. if (err)
  983. goto err_out_free_ldc;
  984. INIT_HLIST_NODE(&port->hash);
  985. INIT_LIST_HEAD(&port->list);
  986. switch_port = 0;
  987. if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
  988. switch_port = 1;
  989. port->switch_port = switch_port;
  990. spin_lock_irqsave(&vp->lock, flags);
  991. if (switch_port)
  992. list_add(&port->list, &vp->port_list);
  993. else
  994. list_add_tail(&port->list, &vp->port_list);
  995. hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
  996. spin_unlock_irqrestore(&vp->lock, flags);
  997. dev_set_drvdata(&vdev->dev, port);
  998. printk(KERN_INFO "%s: PORT ( remote-mac %pM%s )\n",
  999. vp->dev->name, port->raddr,
  1000. switch_port ? " switch-port" : "");
  1001. vio_port_up(&port->vio);
  1002. mdesc_release(hp);
  1003. return 0;
  1004. err_out_free_ldc:
  1005. vio_ldc_free(&port->vio);
  1006. err_out_free_port:
  1007. kfree(port);
  1008. err_out_put_mdesc:
  1009. mdesc_release(hp);
  1010. return err;
  1011. }
  1012. static int vnet_port_remove(struct vio_dev *vdev)
  1013. {
  1014. struct vnet_port *port = dev_get_drvdata(&vdev->dev);
  1015. if (port) {
  1016. struct vnet *vp = port->vp;
  1017. unsigned long flags;
  1018. del_timer_sync(&port->vio.timer);
  1019. spin_lock_irqsave(&vp->lock, flags);
  1020. list_del(&port->list);
  1021. hlist_del(&port->hash);
  1022. spin_unlock_irqrestore(&vp->lock, flags);
  1023. vnet_port_free_tx_bufs(port);
  1024. vio_ldc_free(&port->vio);
  1025. dev_set_drvdata(&vdev->dev, NULL);
  1026. kfree(port);
  1027. }
  1028. return 0;
  1029. }
  1030. static const struct vio_device_id vnet_port_match[] = {
  1031. {
  1032. .type = "vnet-port",
  1033. },
  1034. {},
  1035. };
  1036. MODULE_DEVICE_TABLE(vio, vnet_port_match);
  1037. static struct vio_driver vnet_port_driver = {
  1038. .id_table = vnet_port_match,
  1039. .probe = vnet_port_probe,
  1040. .remove = vnet_port_remove,
  1041. .driver = {
  1042. .name = "vnet_port",
  1043. .owner = THIS_MODULE,
  1044. }
  1045. };
  1046. static int __init vnet_init(void)
  1047. {
  1048. return vio_register_driver(&vnet_port_driver);
  1049. }
  1050. static void __exit vnet_exit(void)
  1051. {
  1052. vio_unregister_driver(&vnet_port_driver);
  1053. }
  1054. module_init(vnet_init);
  1055. module_exit(vnet_exit);