sunvnet.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295
  1. /* sunvnet.c: Sun LDOM Virtual Network Driver.
  2. *
  3. * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/module.h>
  6. #include <linux/kernel.h>
  7. #include <linux/types.h>
  8. #include <linux/slab.h>
  9. #include <linux/delay.h>
  10. #include <linux/init.h>
  11. #include <linux/netdevice.h>
  12. #include <linux/ethtool.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/mutex.h>
  15. #include <asm/vio.h>
  16. #include <asm/ldc.h>
  17. #include "sunvnet.h"
  18. #define DRV_MODULE_NAME "sunvnet"
  19. #define PFX DRV_MODULE_NAME ": "
  20. #define DRV_MODULE_VERSION "1.0"
  21. #define DRV_MODULE_RELDATE "June 25, 2007"
  22. static char version[] __devinitdata =
  23. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  24. MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  25. MODULE_DESCRIPTION("Sun LDOM virtual network driver");
  26. MODULE_LICENSE("GPL");
  27. MODULE_VERSION(DRV_MODULE_VERSION);
  28. /* Ordered from largest major to lowest */
  29. static struct vio_version vnet_versions[] = {
  30. { .major = 1, .minor = 0 },
  31. };
  32. static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
  33. {
  34. return vio_dring_avail(dr, VNET_TX_RING_SIZE);
  35. }
  36. static int vnet_handle_unknown(struct vnet_port *port, void *arg)
  37. {
  38. struct vio_msg_tag *pkt = arg;
  39. printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
  40. pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
  41. printk(KERN_ERR PFX "Resetting connection.\n");
  42. ldc_disconnect(port->vio.lp);
  43. return -ECONNRESET;
  44. }
  45. static int vnet_send_attr(struct vio_driver_state *vio)
  46. {
  47. struct vnet_port *port = to_vnet_port(vio);
  48. struct net_device *dev = port->vp->dev;
  49. struct vio_net_attr_info pkt;
  50. int i;
  51. memset(&pkt, 0, sizeof(pkt));
  52. pkt.tag.type = VIO_TYPE_CTRL;
  53. pkt.tag.stype = VIO_SUBTYPE_INFO;
  54. pkt.tag.stype_env = VIO_ATTR_INFO;
  55. pkt.tag.sid = vio_send_sid(vio);
  56. pkt.xfer_mode = VIO_DRING_MODE;
  57. pkt.addr_type = VNET_ADDR_ETHERMAC;
  58. pkt.ack_freq = 0;
  59. for (i = 0; i < 6; i++)
  60. pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
  61. pkt.mtu = ETH_FRAME_LEN;
  62. viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
  63. "ackfreq[%u] mtu[%llu]\n",
  64. pkt.xfer_mode, pkt.addr_type,
  65. (unsigned long long) pkt.addr,
  66. pkt.ack_freq,
  67. (unsigned long long) pkt.mtu);
  68. return vio_ldc_send(vio, &pkt, sizeof(pkt));
  69. }
  70. static int handle_attr_info(struct vio_driver_state *vio,
  71. struct vio_net_attr_info *pkt)
  72. {
  73. viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
  74. "ackfreq[%u] mtu[%llu]\n",
  75. pkt->xfer_mode, pkt->addr_type,
  76. (unsigned long long) pkt->addr,
  77. pkt->ack_freq,
  78. (unsigned long long) pkt->mtu);
  79. pkt->tag.sid = vio_send_sid(vio);
  80. if (pkt->xfer_mode != VIO_DRING_MODE ||
  81. pkt->addr_type != VNET_ADDR_ETHERMAC ||
  82. pkt->mtu != ETH_FRAME_LEN) {
  83. viodbg(HS, "SEND NET ATTR NACK\n");
  84. pkt->tag.stype = VIO_SUBTYPE_NACK;
  85. (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
  86. return -ECONNRESET;
  87. } else {
  88. viodbg(HS, "SEND NET ATTR ACK\n");
  89. pkt->tag.stype = VIO_SUBTYPE_ACK;
  90. return vio_ldc_send(vio, pkt, sizeof(*pkt));
  91. }
  92. }
  93. static int handle_attr_ack(struct vio_driver_state *vio,
  94. struct vio_net_attr_info *pkt)
  95. {
  96. viodbg(HS, "GOT NET ATTR ACK\n");
  97. return 0;
  98. }
  99. static int handle_attr_nack(struct vio_driver_state *vio,
  100. struct vio_net_attr_info *pkt)
  101. {
  102. viodbg(HS, "GOT NET ATTR NACK\n");
  103. return -ECONNRESET;
  104. }
  105. static int vnet_handle_attr(struct vio_driver_state *vio, void *arg)
  106. {
  107. struct vio_net_attr_info *pkt = arg;
  108. switch (pkt->tag.stype) {
  109. case VIO_SUBTYPE_INFO:
  110. return handle_attr_info(vio, pkt);
  111. case VIO_SUBTYPE_ACK:
  112. return handle_attr_ack(vio, pkt);
  113. case VIO_SUBTYPE_NACK:
  114. return handle_attr_nack(vio, pkt);
  115. default:
  116. return -ECONNRESET;
  117. }
  118. }
  119. static void vnet_handshake_complete(struct vio_driver_state *vio)
  120. {
  121. struct vio_dring_state *dr;
  122. dr = &vio->drings[VIO_DRIVER_RX_RING];
  123. dr->snd_nxt = dr->rcv_nxt = 1;
  124. dr = &vio->drings[VIO_DRIVER_TX_RING];
  125. dr->snd_nxt = dr->rcv_nxt = 1;
  126. }
  127. /* The hypervisor interface that implements copying to/from imported
  128. * memory from another domain requires that copies are done to 8-byte
  129. * aligned buffers, and that the lengths of such copies are also 8-byte
  130. * multiples.
  131. *
  132. * So we align skb->data to an 8-byte multiple and pad-out the data
  133. * area so we can round the copy length up to the next multiple of
  134. * 8 for the copy.
  135. *
  136. * The transmitter puts the actual start of the packet 6 bytes into
  137. * the buffer it sends over, so that the IP headers after the ethernet
  138. * header are aligned properly. These 6 bytes are not in the descriptor
  139. * length, they are simply implied. This offset is represented using
  140. * the VNET_PACKET_SKIP macro.
  141. */
  142. static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
  143. unsigned int len)
  144. {
  145. struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
  146. unsigned long addr, off;
  147. if (unlikely(!skb))
  148. return NULL;
  149. addr = (unsigned long) skb->data;
  150. off = ((addr + 7UL) & ~7UL) - addr;
  151. if (off)
  152. skb_reserve(skb, off);
  153. return skb;
  154. }
  155. static int vnet_rx_one(struct vnet_port *port, unsigned int len,
  156. struct ldc_trans_cookie *cookies, int ncookies)
  157. {
  158. struct net_device *dev = port->vp->dev;
  159. unsigned int copy_len;
  160. struct sk_buff *skb;
  161. int err;
  162. err = -EMSGSIZE;
  163. if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) {
  164. dev->stats.rx_length_errors++;
  165. goto out_dropped;
  166. }
  167. skb = alloc_and_align_skb(dev, len);
  168. err = -ENOMEM;
  169. if (unlikely(!skb)) {
  170. dev->stats.rx_missed_errors++;
  171. goto out_dropped;
  172. }
  173. copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
  174. skb_put(skb, copy_len);
  175. err = ldc_copy(port->vio.lp, LDC_COPY_IN,
  176. skb->data, copy_len, 0,
  177. cookies, ncookies);
  178. if (unlikely(err < 0)) {
  179. dev->stats.rx_frame_errors++;
  180. goto out_free_skb;
  181. }
  182. skb_pull(skb, VNET_PACKET_SKIP);
  183. skb_trim(skb, len);
  184. skb->protocol = eth_type_trans(skb, dev);
  185. dev->stats.rx_packets++;
  186. dev->stats.rx_bytes += len;
  187. netif_rx(skb);
  188. return 0;
  189. out_free_skb:
  190. kfree_skb(skb);
  191. out_dropped:
  192. dev->stats.rx_dropped++;
  193. return err;
  194. }
  195. static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
  196. u32 start, u32 end, u8 vio_dring_state)
  197. {
  198. struct vio_dring_data hdr = {
  199. .tag = {
  200. .type = VIO_TYPE_DATA,
  201. .stype = VIO_SUBTYPE_ACK,
  202. .stype_env = VIO_DRING_DATA,
  203. .sid = vio_send_sid(&port->vio),
  204. },
  205. .dring_ident = dr->ident,
  206. .start_idx = start,
  207. .end_idx = end,
  208. .state = vio_dring_state,
  209. };
  210. int err, delay;
  211. hdr.seq = dr->snd_nxt;
  212. delay = 1;
  213. do {
  214. err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
  215. if (err > 0) {
  216. dr->snd_nxt++;
  217. break;
  218. }
  219. udelay(delay);
  220. if ((delay <<= 1) > 128)
  221. delay = 128;
  222. } while (err == -EAGAIN);
  223. return err;
  224. }
  225. static u32 next_idx(u32 idx, struct vio_dring_state *dr)
  226. {
  227. if (++idx == dr->num_entries)
  228. idx = 0;
  229. return idx;
  230. }
  231. static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
  232. {
  233. if (idx == 0)
  234. idx = dr->num_entries - 1;
  235. else
  236. idx--;
  237. return idx;
  238. }
  239. static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
  240. struct vio_dring_state *dr,
  241. u32 index)
  242. {
  243. struct vio_net_desc *desc = port->vio.desc_buf;
  244. int err;
  245. err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
  246. (index * dr->entry_size),
  247. dr->cookies, dr->ncookies);
  248. if (err < 0)
  249. return ERR_PTR(err);
  250. return desc;
  251. }
  252. static int put_rx_desc(struct vnet_port *port,
  253. struct vio_dring_state *dr,
  254. struct vio_net_desc *desc,
  255. u32 index)
  256. {
  257. int err;
  258. err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
  259. (index * dr->entry_size),
  260. dr->cookies, dr->ncookies);
  261. if (err < 0)
  262. return err;
  263. return 0;
  264. }
  265. static int vnet_walk_rx_one(struct vnet_port *port,
  266. struct vio_dring_state *dr,
  267. u32 index, int *needs_ack)
  268. {
  269. struct vio_net_desc *desc = get_rx_desc(port, dr, index);
  270. struct vio_driver_state *vio = &port->vio;
  271. int err;
  272. if (IS_ERR(desc))
  273. return PTR_ERR(desc);
  274. viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
  275. desc->hdr.state, desc->hdr.ack,
  276. desc->size, desc->ncookies,
  277. desc->cookies[0].cookie_addr,
  278. desc->cookies[0].cookie_size);
  279. if (desc->hdr.state != VIO_DESC_READY)
  280. return 1;
  281. err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
  282. if (err == -ECONNRESET)
  283. return err;
  284. desc->hdr.state = VIO_DESC_DONE;
  285. err = put_rx_desc(port, dr, desc, index);
  286. if (err < 0)
  287. return err;
  288. *needs_ack = desc->hdr.ack;
  289. return 0;
  290. }
  291. static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
  292. u32 start, u32 end)
  293. {
  294. struct vio_driver_state *vio = &port->vio;
  295. int ack_start = -1, ack_end = -1;
  296. end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
  297. viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
  298. while (start != end) {
  299. int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
  300. if (err == -ECONNRESET)
  301. return err;
  302. if (err != 0)
  303. break;
  304. if (ack_start == -1)
  305. ack_start = start;
  306. ack_end = start;
  307. start = next_idx(start, dr);
  308. if (ack && start != end) {
  309. err = vnet_send_ack(port, dr, ack_start, ack_end,
  310. VIO_DRING_ACTIVE);
  311. if (err == -ECONNRESET)
  312. return err;
  313. ack_start = -1;
  314. }
  315. }
  316. if (unlikely(ack_start == -1))
  317. ack_start = ack_end = prev_idx(start, dr);
  318. return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
  319. }
  320. static int vnet_rx(struct vnet_port *port, void *msgbuf)
  321. {
  322. struct vio_dring_data *pkt = msgbuf;
  323. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
  324. struct vio_driver_state *vio = &port->vio;
  325. viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
  326. pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
  327. if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
  328. return 0;
  329. if (unlikely(pkt->seq != dr->rcv_nxt)) {
  330. printk(KERN_ERR PFX "RX out of sequence seq[0x%llx] "
  331. "rcv_nxt[0x%llx]\n", pkt->seq, dr->rcv_nxt);
  332. return 0;
  333. }
  334. dr->rcv_nxt++;
  335. /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
  336. return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
  337. }
  338. static int idx_is_pending(struct vio_dring_state *dr, u32 end)
  339. {
  340. u32 idx = dr->cons;
  341. int found = 0;
  342. while (idx != dr->prod) {
  343. if (idx == end) {
  344. found = 1;
  345. break;
  346. }
  347. idx = next_idx(idx, dr);
  348. }
  349. return found;
  350. }
  351. static int vnet_ack(struct vnet_port *port, void *msgbuf)
  352. {
  353. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  354. struct vio_dring_data *pkt = msgbuf;
  355. struct net_device *dev;
  356. struct vnet *vp;
  357. u32 end;
  358. if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
  359. return 0;
  360. end = pkt->end_idx;
  361. if (unlikely(!idx_is_pending(dr, end)))
  362. return 0;
  363. dr->cons = next_idx(end, dr);
  364. vp = port->vp;
  365. dev = vp->dev;
  366. if (unlikely(netif_queue_stopped(dev) &&
  367. vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
  368. return 1;
  369. return 0;
  370. }
  371. static int vnet_nack(struct vnet_port *port, void *msgbuf)
  372. {
  373. /* XXX just reset or similar XXX */
  374. return 0;
  375. }
  376. static int handle_mcast(struct vnet_port *port, void *msgbuf)
  377. {
  378. struct vio_net_mcast_info *pkt = msgbuf;
  379. if (pkt->tag.stype != VIO_SUBTYPE_ACK)
  380. printk(KERN_ERR PFX "%s: Got unexpected MCAST reply "
  381. "[%02x:%02x:%04x:%08x]\n",
  382. port->vp->dev->name,
  383. pkt->tag.type,
  384. pkt->tag.stype,
  385. pkt->tag.stype_env,
  386. pkt->tag.sid);
  387. return 0;
  388. }
  389. static void maybe_tx_wakeup(struct vnet *vp)
  390. {
  391. struct net_device *dev = vp->dev;
  392. netif_tx_lock(dev);
  393. if (likely(netif_queue_stopped(dev))) {
  394. struct vnet_port *port;
  395. int wake = 1;
  396. list_for_each_entry(port, &vp->port_list, list) {
  397. struct vio_dring_state *dr;
  398. dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  399. if (vnet_tx_dring_avail(dr) <
  400. VNET_TX_WAKEUP_THRESH(dr)) {
  401. wake = 0;
  402. break;
  403. }
  404. }
  405. if (wake)
  406. netif_wake_queue(dev);
  407. }
  408. netif_tx_unlock(dev);
  409. }
  410. static void vnet_event(void *arg, int event)
  411. {
  412. struct vnet_port *port = arg;
  413. struct vio_driver_state *vio = &port->vio;
  414. unsigned long flags;
  415. int tx_wakeup, err;
  416. spin_lock_irqsave(&vio->lock, flags);
  417. if (unlikely(event == LDC_EVENT_RESET ||
  418. event == LDC_EVENT_UP)) {
  419. vio_link_state_change(vio, event);
  420. spin_unlock_irqrestore(&vio->lock, flags);
  421. if (event == LDC_EVENT_RESET)
  422. vio_port_up(vio);
  423. return;
  424. }
  425. if (unlikely(event != LDC_EVENT_DATA_READY)) {
  426. printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
  427. spin_unlock_irqrestore(&vio->lock, flags);
  428. return;
  429. }
  430. tx_wakeup = err = 0;
  431. while (1) {
  432. union {
  433. struct vio_msg_tag tag;
  434. u64 raw[8];
  435. } msgbuf;
  436. err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
  437. if (unlikely(err < 0)) {
  438. if (err == -ECONNRESET)
  439. vio_conn_reset(vio);
  440. break;
  441. }
  442. if (err == 0)
  443. break;
  444. viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
  445. msgbuf.tag.type,
  446. msgbuf.tag.stype,
  447. msgbuf.tag.stype_env,
  448. msgbuf.tag.sid);
  449. err = vio_validate_sid(vio, &msgbuf.tag);
  450. if (err < 0)
  451. break;
  452. if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
  453. if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
  454. err = vnet_rx(port, &msgbuf);
  455. } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
  456. err = vnet_ack(port, &msgbuf);
  457. if (err > 0)
  458. tx_wakeup |= err;
  459. } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
  460. err = vnet_nack(port, &msgbuf);
  461. }
  462. } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
  463. if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
  464. err = handle_mcast(port, &msgbuf);
  465. else
  466. err = vio_control_pkt_engine(vio, &msgbuf);
  467. if (err)
  468. break;
  469. } else {
  470. err = vnet_handle_unknown(port, &msgbuf);
  471. }
  472. if (err == -ECONNRESET)
  473. break;
  474. }
  475. spin_unlock(&vio->lock);
  476. if (unlikely(tx_wakeup && err != -ECONNRESET))
  477. maybe_tx_wakeup(port->vp);
  478. local_irq_restore(flags);
  479. }
  480. static int __vnet_tx_trigger(struct vnet_port *port)
  481. {
  482. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  483. struct vio_dring_data hdr = {
  484. .tag = {
  485. .type = VIO_TYPE_DATA,
  486. .stype = VIO_SUBTYPE_INFO,
  487. .stype_env = VIO_DRING_DATA,
  488. .sid = vio_send_sid(&port->vio),
  489. },
  490. .dring_ident = dr->ident,
  491. .start_idx = dr->prod,
  492. .end_idx = (u32) -1,
  493. };
  494. int err, delay;
  495. hdr.seq = dr->snd_nxt;
  496. delay = 1;
  497. do {
  498. err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
  499. if (err > 0) {
  500. dr->snd_nxt++;
  501. break;
  502. }
  503. udelay(delay);
  504. if ((delay <<= 1) > 128)
  505. delay = 128;
  506. } while (err == -EAGAIN);
  507. return err;
  508. }
  509. struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
  510. {
  511. unsigned int hash = vnet_hashfn(skb->data);
  512. struct hlist_head *hp = &vp->port_hash[hash];
  513. struct hlist_node *n;
  514. struct vnet_port *port;
  515. hlist_for_each_entry(port, n, hp, hash) {
  516. if (!compare_ether_addr(port->raddr, skb->data))
  517. return port;
  518. }
  519. port = NULL;
  520. if (!list_empty(&vp->port_list))
  521. port = list_entry(vp->port_list.next, struct vnet_port, list);
  522. return port;
  523. }
  524. struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
  525. {
  526. struct vnet_port *ret;
  527. unsigned long flags;
  528. spin_lock_irqsave(&vp->lock, flags);
  529. ret = __tx_port_find(vp, skb);
  530. spin_unlock_irqrestore(&vp->lock, flags);
  531. return ret;
  532. }
  533. static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  534. {
  535. struct vnet *vp = netdev_priv(dev);
  536. struct vnet_port *port = tx_port_find(vp, skb);
  537. struct vio_dring_state *dr;
  538. struct vio_net_desc *d;
  539. unsigned long flags;
  540. unsigned int len;
  541. void *tx_buf;
  542. int i, err;
  543. if (unlikely(!port))
  544. goto out_dropped;
  545. spin_lock_irqsave(&port->vio.lock, flags);
  546. dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  547. if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
  548. if (!netif_queue_stopped(dev)) {
  549. netif_stop_queue(dev);
  550. /* This is a hard error, log it. */
  551. printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
  552. "queue awake!\n", dev->name);
  553. dev->stats.tx_errors++;
  554. }
  555. spin_unlock_irqrestore(&port->vio.lock, flags);
  556. return NETDEV_TX_BUSY;
  557. }
  558. d = vio_dring_cur(dr);
  559. tx_buf = port->tx_bufs[dr->prod].buf;
  560. skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len);
  561. len = skb->len;
  562. if (len < ETH_ZLEN) {
  563. len = ETH_ZLEN;
  564. memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len);
  565. }
  566. d->hdr.ack = VIO_ACK_ENABLE;
  567. d->size = len;
  568. d->ncookies = port->tx_bufs[dr->prod].ncookies;
  569. for (i = 0; i < d->ncookies; i++)
  570. d->cookies[i] = port->tx_bufs[dr->prod].cookies[i];
  571. /* This has to be a non-SMP write barrier because we are writing
  572. * to memory which is shared with the peer LDOM.
  573. */
  574. wmb();
  575. d->hdr.state = VIO_DESC_READY;
  576. err = __vnet_tx_trigger(port);
  577. if (unlikely(err < 0)) {
  578. printk(KERN_INFO PFX "%s: TX trigger error %d\n",
  579. dev->name, err);
  580. d->hdr.state = VIO_DESC_FREE;
  581. dev->stats.tx_carrier_errors++;
  582. goto out_dropped_unlock;
  583. }
  584. dev->stats.tx_packets++;
  585. dev->stats.tx_bytes += skb->len;
  586. dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
  587. if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
  588. netif_stop_queue(dev);
  589. if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
  590. netif_wake_queue(dev);
  591. }
  592. spin_unlock_irqrestore(&port->vio.lock, flags);
  593. dev_kfree_skb(skb);
  594. dev->trans_start = jiffies;
  595. return NETDEV_TX_OK;
  596. out_dropped_unlock:
  597. spin_unlock_irqrestore(&port->vio.lock, flags);
  598. out_dropped:
  599. dev_kfree_skb(skb);
  600. dev->stats.tx_dropped++;
  601. return NETDEV_TX_OK;
  602. }
  603. static void vnet_tx_timeout(struct net_device *dev)
  604. {
  605. /* XXX Implement me XXX */
  606. }
  607. static int vnet_open(struct net_device *dev)
  608. {
  609. netif_carrier_on(dev);
  610. netif_start_queue(dev);
  611. return 0;
  612. }
  613. static int vnet_close(struct net_device *dev)
  614. {
  615. netif_stop_queue(dev);
  616. netif_carrier_off(dev);
  617. return 0;
  618. }
  619. static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
  620. {
  621. struct vnet_mcast_entry *m;
  622. for (m = vp->mcast_list; m; m = m->next) {
  623. if (!memcmp(m->addr, addr, ETH_ALEN))
  624. return m;
  625. }
  626. return NULL;
  627. }
  628. static void __update_mc_list(struct vnet *vp, struct net_device *dev)
  629. {
  630. struct dev_addr_list *p;
  631. for (p = dev->mc_list; p; p = p->next) {
  632. struct vnet_mcast_entry *m;
  633. m = __vnet_mc_find(vp, p->dmi_addr);
  634. if (m) {
  635. m->hit = 1;
  636. continue;
  637. }
  638. if (!m) {
  639. m = kzalloc(sizeof(*m), GFP_ATOMIC);
  640. if (!m)
  641. continue;
  642. memcpy(m->addr, p->dmi_addr, ETH_ALEN);
  643. m->hit = 1;
  644. m->next = vp->mcast_list;
  645. vp->mcast_list = m;
  646. }
  647. }
  648. }
  649. static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
  650. {
  651. struct vio_net_mcast_info info;
  652. struct vnet_mcast_entry *m, **pp;
  653. int n_addrs;
  654. memset(&info, 0, sizeof(info));
  655. info.tag.type = VIO_TYPE_CTRL;
  656. info.tag.stype = VIO_SUBTYPE_INFO;
  657. info.tag.stype_env = VNET_MCAST_INFO;
  658. info.tag.sid = vio_send_sid(&port->vio);
  659. info.set = 1;
  660. n_addrs = 0;
  661. for (m = vp->mcast_list; m; m = m->next) {
  662. if (m->sent)
  663. continue;
  664. m->sent = 1;
  665. memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
  666. m->addr, ETH_ALEN);
  667. if (++n_addrs == VNET_NUM_MCAST) {
  668. info.count = n_addrs;
  669. (void) vio_ldc_send(&port->vio, &info,
  670. sizeof(info));
  671. n_addrs = 0;
  672. }
  673. }
  674. if (n_addrs) {
  675. info.count = n_addrs;
  676. (void) vio_ldc_send(&port->vio, &info, sizeof(info));
  677. }
  678. info.set = 0;
  679. n_addrs = 0;
  680. pp = &vp->mcast_list;
  681. while ((m = *pp) != NULL) {
  682. if (m->hit) {
  683. m->hit = 0;
  684. pp = &m->next;
  685. continue;
  686. }
  687. memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
  688. m->addr, ETH_ALEN);
  689. if (++n_addrs == VNET_NUM_MCAST) {
  690. info.count = n_addrs;
  691. (void) vio_ldc_send(&port->vio, &info,
  692. sizeof(info));
  693. n_addrs = 0;
  694. }
  695. *pp = m->next;
  696. kfree(m);
  697. }
  698. if (n_addrs) {
  699. info.count = n_addrs;
  700. (void) vio_ldc_send(&port->vio, &info, sizeof(info));
  701. }
  702. }
  703. static void vnet_set_rx_mode(struct net_device *dev)
  704. {
  705. struct vnet *vp = netdev_priv(dev);
  706. struct vnet_port *port;
  707. unsigned long flags;
  708. spin_lock_irqsave(&vp->lock, flags);
  709. if (!list_empty(&vp->port_list)) {
  710. port = list_entry(vp->port_list.next, struct vnet_port, list);
  711. if (port->switch_port) {
  712. __update_mc_list(vp, dev);
  713. __send_mc_list(vp, port);
  714. }
  715. }
  716. spin_unlock_irqrestore(&vp->lock, flags);
  717. }
  718. static int vnet_change_mtu(struct net_device *dev, int new_mtu)
  719. {
  720. if (new_mtu != ETH_DATA_LEN)
  721. return -EINVAL;
  722. dev->mtu = new_mtu;
  723. return 0;
  724. }
  725. static int vnet_set_mac_addr(struct net_device *dev, void *p)
  726. {
  727. return -EINVAL;
  728. }
  729. static void vnet_get_drvinfo(struct net_device *dev,
  730. struct ethtool_drvinfo *info)
  731. {
  732. strcpy(info->driver, DRV_MODULE_NAME);
  733. strcpy(info->version, DRV_MODULE_VERSION);
  734. }
  735. static u32 vnet_get_msglevel(struct net_device *dev)
  736. {
  737. struct vnet *vp = netdev_priv(dev);
  738. return vp->msg_enable;
  739. }
  740. static void vnet_set_msglevel(struct net_device *dev, u32 value)
  741. {
  742. struct vnet *vp = netdev_priv(dev);
  743. vp->msg_enable = value;
  744. }
  745. static const struct ethtool_ops vnet_ethtool_ops = {
  746. .get_drvinfo = vnet_get_drvinfo,
  747. .get_msglevel = vnet_get_msglevel,
  748. .set_msglevel = vnet_set_msglevel,
  749. .get_link = ethtool_op_get_link,
  750. };
  751. static void vnet_port_free_tx_bufs(struct vnet_port *port)
  752. {
  753. struct vio_dring_state *dr;
  754. int i;
  755. dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  756. if (dr->base) {
  757. ldc_free_exp_dring(port->vio.lp, dr->base,
  758. (dr->entry_size * dr->num_entries),
  759. dr->cookies, dr->ncookies);
  760. dr->base = NULL;
  761. dr->entry_size = 0;
  762. dr->num_entries = 0;
  763. dr->pending = 0;
  764. dr->ncookies = 0;
  765. }
  766. for (i = 0; i < VNET_TX_RING_SIZE; i++) {
  767. void *buf = port->tx_bufs[i].buf;
  768. if (!buf)
  769. continue;
  770. ldc_unmap(port->vio.lp,
  771. port->tx_bufs[i].cookies,
  772. port->tx_bufs[i].ncookies);
  773. kfree(buf);
  774. port->tx_bufs[i].buf = NULL;
  775. }
  776. }
  777. static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
  778. {
  779. struct vio_dring_state *dr;
  780. unsigned long len;
  781. int i, err, ncookies;
  782. void *dring;
  783. for (i = 0; i < VNET_TX_RING_SIZE; i++) {
  784. void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL);
  785. int map_len = (ETH_FRAME_LEN + 7) & ~7;
  786. err = -ENOMEM;
  787. if (!buf) {
  788. printk(KERN_ERR "TX buffer allocation failure\n");
  789. goto err_out;
  790. }
  791. err = -EFAULT;
  792. if ((unsigned long)buf & (8UL - 1)) {
  793. printk(KERN_ERR "TX buffer misaligned\n");
  794. kfree(buf);
  795. goto err_out;
  796. }
  797. err = ldc_map_single(port->vio.lp, buf, map_len,
  798. port->tx_bufs[i].cookies, 2,
  799. (LDC_MAP_SHADOW |
  800. LDC_MAP_DIRECT |
  801. LDC_MAP_RW));
  802. if (err < 0) {
  803. kfree(buf);
  804. goto err_out;
  805. }
  806. port->tx_bufs[i].buf = buf;
  807. port->tx_bufs[i].ncookies = err;
  808. }
  809. dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  810. len = (VNET_TX_RING_SIZE *
  811. (sizeof(struct vio_net_desc) +
  812. (sizeof(struct ldc_trans_cookie) * 2)));
  813. ncookies = VIO_MAX_RING_COOKIES;
  814. dring = ldc_alloc_exp_dring(port->vio.lp, len,
  815. dr->cookies, &ncookies,
  816. (LDC_MAP_SHADOW |
  817. LDC_MAP_DIRECT |
  818. LDC_MAP_RW));
  819. if (IS_ERR(dring)) {
  820. err = PTR_ERR(dring);
  821. goto err_out;
  822. }
  823. dr->base = dring;
  824. dr->entry_size = (sizeof(struct vio_net_desc) +
  825. (sizeof(struct ldc_trans_cookie) * 2));
  826. dr->num_entries = VNET_TX_RING_SIZE;
  827. dr->prod = dr->cons = 0;
  828. dr->pending = VNET_TX_RING_SIZE;
  829. dr->ncookies = ncookies;
  830. return 0;
  831. err_out:
  832. vnet_port_free_tx_bufs(port);
  833. return err;
  834. }
  835. static LIST_HEAD(vnet_list);
  836. static DEFINE_MUTEX(vnet_list_mutex);
  837. static const struct net_device_ops vnet_ops = {
  838. .ndo_open = vnet_open,
  839. .ndo_stop = vnet_close,
  840. .ndo_set_multicast_list = vnet_set_rx_mode,
  841. .ndo_set_mac_address = vnet_set_mac_addr,
  842. .ndo_tx_timeout = vnet_tx_timeout,
  843. .ndo_change_mtu = vnet_change_mtu,
  844. .ndo_start_xmit = vnet_start_xmit,
  845. };
  846. static struct vnet * __devinit vnet_new(const u64 *local_mac)
  847. {
  848. struct net_device *dev;
  849. struct vnet *vp;
  850. int err, i;
  851. dev = alloc_etherdev(sizeof(*vp));
  852. if (!dev) {
  853. printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
  854. return ERR_PTR(-ENOMEM);
  855. }
  856. for (i = 0; i < ETH_ALEN; i++)
  857. dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
  858. memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  859. vp = netdev_priv(dev);
  860. spin_lock_init(&vp->lock);
  861. vp->dev = dev;
  862. INIT_LIST_HEAD(&vp->port_list);
  863. for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
  864. INIT_HLIST_HEAD(&vp->port_hash[i]);
  865. INIT_LIST_HEAD(&vp->list);
  866. vp->local_mac = *local_mac;
  867. dev->netdev_ops = &vnet_ops;
  868. dev->ethtool_ops = &vnet_ethtool_ops;
  869. dev->watchdog_timeo = VNET_TX_TIMEOUT;
  870. err = register_netdev(dev);
  871. if (err) {
  872. printk(KERN_ERR PFX "Cannot register net device, "
  873. "aborting.\n");
  874. goto err_out_free_dev;
  875. }
  876. printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
  877. for (i = 0; i < 6; i++)
  878. printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
  879. list_add(&vp->list, &vnet_list);
  880. return vp;
  881. err_out_free_dev:
  882. free_netdev(dev);
  883. return ERR_PTR(err);
  884. }
  885. static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
  886. {
  887. struct vnet *iter, *vp;
  888. mutex_lock(&vnet_list_mutex);
  889. vp = NULL;
  890. list_for_each_entry(iter, &vnet_list, list) {
  891. if (iter->local_mac == *local_mac) {
  892. vp = iter;
  893. break;
  894. }
  895. }
  896. if (!vp)
  897. vp = vnet_new(local_mac);
  898. mutex_unlock(&vnet_list_mutex);
  899. return vp;
  900. }
  901. static const char *local_mac_prop = "local-mac-address";
  902. static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp,
  903. u64 port_node)
  904. {
  905. const u64 *local_mac = NULL;
  906. u64 a;
  907. mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
  908. u64 target = mdesc_arc_target(hp, a);
  909. const char *name;
  910. name = mdesc_get_property(hp, target, "name", NULL);
  911. if (!name || strcmp(name, "network"))
  912. continue;
  913. local_mac = mdesc_get_property(hp, target,
  914. local_mac_prop, NULL);
  915. if (local_mac)
  916. break;
  917. }
  918. if (!local_mac)
  919. return ERR_PTR(-ENODEV);
  920. return vnet_find_or_create(local_mac);
  921. }
  922. static struct ldc_channel_config vnet_ldc_cfg = {
  923. .event = vnet_event,
  924. .mtu = 64,
  925. .mode = LDC_MODE_UNRELIABLE,
  926. };
  927. static struct vio_driver_ops vnet_vio_ops = {
  928. .send_attr = vnet_send_attr,
  929. .handle_attr = vnet_handle_attr,
  930. .handshake_complete = vnet_handshake_complete,
  931. };
  932. static void __devinit print_version(void)
  933. {
  934. static int version_printed;
  935. if (version_printed++ == 0)
  936. printk(KERN_INFO "%s", version);
  937. }
  938. const char *remote_macaddr_prop = "remote-mac-address";
  939. static int __devinit vnet_port_probe(struct vio_dev *vdev,
  940. const struct vio_device_id *id)
  941. {
  942. struct mdesc_handle *hp;
  943. struct vnet_port *port;
  944. unsigned long flags;
  945. struct vnet *vp;
  946. const u64 *rmac;
  947. int len, i, err, switch_port;
  948. print_version();
  949. hp = mdesc_grab();
  950. vp = vnet_find_parent(hp, vdev->mp);
  951. if (IS_ERR(vp)) {
  952. printk(KERN_ERR PFX "Cannot find port parent vnet.\n");
  953. err = PTR_ERR(vp);
  954. goto err_out_put_mdesc;
  955. }
  956. rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
  957. err = -ENODEV;
  958. if (!rmac) {
  959. printk(KERN_ERR PFX "Port lacks %s property.\n",
  960. remote_macaddr_prop);
  961. goto err_out_put_mdesc;
  962. }
  963. port = kzalloc(sizeof(*port), GFP_KERNEL);
  964. err = -ENOMEM;
  965. if (!port) {
  966. printk(KERN_ERR PFX "Cannot allocate vnet_port.\n");
  967. goto err_out_put_mdesc;
  968. }
  969. for (i = 0; i < ETH_ALEN; i++)
  970. port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
  971. port->vp = vp;
  972. err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK,
  973. vnet_versions, ARRAY_SIZE(vnet_versions),
  974. &vnet_vio_ops, vp->dev->name);
  975. if (err)
  976. goto err_out_free_port;
  977. err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port);
  978. if (err)
  979. goto err_out_free_port;
  980. err = vnet_port_alloc_tx_bufs(port);
  981. if (err)
  982. goto err_out_free_ldc;
  983. INIT_HLIST_NODE(&port->hash);
  984. INIT_LIST_HEAD(&port->list);
  985. switch_port = 0;
  986. if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
  987. switch_port = 1;
  988. port->switch_port = switch_port;
  989. spin_lock_irqsave(&vp->lock, flags);
  990. if (switch_port)
  991. list_add(&port->list, &vp->port_list);
  992. else
  993. list_add_tail(&port->list, &vp->port_list);
  994. hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
  995. spin_unlock_irqrestore(&vp->lock, flags);
  996. dev_set_drvdata(&vdev->dev, port);
  997. printk(KERN_INFO "%s: PORT ( remote-mac %pM%s )\n",
  998. vp->dev->name, port->raddr,
  999. switch_port ? " switch-port" : "");
  1000. vio_port_up(&port->vio);
  1001. mdesc_release(hp);
  1002. return 0;
  1003. err_out_free_ldc:
  1004. vio_ldc_free(&port->vio);
  1005. err_out_free_port:
  1006. kfree(port);
  1007. err_out_put_mdesc:
  1008. mdesc_release(hp);
  1009. return err;
  1010. }
  1011. static int vnet_port_remove(struct vio_dev *vdev)
  1012. {
  1013. struct vnet_port *port = dev_get_drvdata(&vdev->dev);
  1014. if (port) {
  1015. struct vnet *vp = port->vp;
  1016. unsigned long flags;
  1017. del_timer_sync(&port->vio.timer);
  1018. spin_lock_irqsave(&vp->lock, flags);
  1019. list_del(&port->list);
  1020. hlist_del(&port->hash);
  1021. spin_unlock_irqrestore(&vp->lock, flags);
  1022. vnet_port_free_tx_bufs(port);
  1023. vio_ldc_free(&port->vio);
  1024. dev_set_drvdata(&vdev->dev, NULL);
  1025. kfree(port);
  1026. }
  1027. return 0;
  1028. }
  1029. static const struct vio_device_id vnet_port_match[] = {
  1030. {
  1031. .type = "vnet-port",
  1032. },
  1033. {},
  1034. };
  1035. MODULE_DEVICE_TABLE(vio, vnet_port_match);
  1036. static struct vio_driver vnet_port_driver = {
  1037. .id_table = vnet_port_match,
  1038. .probe = vnet_port_probe,
  1039. .remove = vnet_port_remove,
  1040. .driver = {
  1041. .name = "vnet_port",
  1042. .owner = THIS_MODULE,
  1043. }
  1044. };
  1045. static int __init vnet_init(void)
  1046. {
  1047. return vio_register_driver(&vnet_port_driver);
  1048. }
  1049. static void __exit vnet_exit(void)
  1050. {
  1051. vio_unregister_driver(&vnet_port_driver);
  1052. }
  1053. module_init(vnet_init);
  1054. module_exit(vnet_exit);