netvsc.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939
  1. /*
  2. * Copyright (c) 2009, Microsoft Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Authors:
  18. * Haiyang Zhang <haiyangz@microsoft.com>
  19. * Hank Janssen <hjanssen@microsoft.com>
  20. */
  21. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22. #include <linux/kernel.h>
  23. #include <linux/sched.h>
  24. #include <linux/wait.h>
  25. #include <linux/mm.h>
  26. #include <linux/delay.h>
  27. #include <linux/io.h>
  28. #include <linux/slab.h>
  29. #include <linux/netdevice.h>
  30. #include "hyperv_net.h"
  31. static struct netvsc_device *alloc_net_device(struct hv_device *device)
  32. {
  33. struct netvsc_device *net_device;
  34. struct net_device *ndev = hv_get_drvdata(device);
  35. net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
  36. if (!net_device)
  37. return NULL;
  38. net_device->destroy = false;
  39. net_device->dev = device;
  40. net_device->ndev = ndev;
  41. hv_set_drvdata(device, net_device);
  42. return net_device;
  43. }
  44. static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
  45. {
  46. struct netvsc_device *net_device;
  47. net_device = hv_get_drvdata(device);
  48. if (net_device && net_device->destroy)
  49. net_device = NULL;
  50. return net_device;
  51. }
  52. static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
  53. {
  54. struct netvsc_device *net_device;
  55. net_device = hv_get_drvdata(device);
  56. if (!net_device)
  57. goto get_in_err;
  58. if (net_device->destroy &&
  59. atomic_read(&net_device->num_outstanding_sends) == 0)
  60. net_device = NULL;
  61. get_in_err:
  62. return net_device;
  63. }
  64. static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
  65. {
  66. struct nvsp_message *revoke_packet;
  67. int ret = 0;
  68. struct net_device *ndev = net_device->ndev;
  69. /*
  70. * If we got a section count, it means we received a
  71. * SendReceiveBufferComplete msg (ie sent
  72. * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
  73. * to send a revoke msg here
  74. */
  75. if (net_device->recv_section_cnt) {
  76. /* Send the revoke receive buffer */
  77. revoke_packet = &net_device->revoke_packet;
  78. memset(revoke_packet, 0, sizeof(struct nvsp_message));
  79. revoke_packet->hdr.msg_type =
  80. NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
  81. revoke_packet->msg.v1_msg.
  82. revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
  83. ret = vmbus_sendpacket(net_device->dev->channel,
  84. revoke_packet,
  85. sizeof(struct nvsp_message),
  86. (unsigned long)revoke_packet,
  87. VM_PKT_DATA_INBAND, 0);
  88. /*
  89. * If we failed here, we might as well return and
  90. * have a leak rather than continue and a bugchk
  91. */
  92. if (ret != 0) {
  93. netdev_err(ndev, "unable to send "
  94. "revoke receive buffer to netvsp\n");
  95. return ret;
  96. }
  97. }
  98. /* Teardown the gpadl on the vsp end */
  99. if (net_device->recv_buf_gpadl_handle) {
  100. ret = vmbus_teardown_gpadl(net_device->dev->channel,
  101. net_device->recv_buf_gpadl_handle);
  102. /* If we failed here, we might as well return and have a leak
  103. * rather than continue and a bugchk
  104. */
  105. if (ret != 0) {
  106. netdev_err(ndev,
  107. "unable to teardown receive buffer's gpadl\n");
  108. return ret;
  109. }
  110. net_device->recv_buf_gpadl_handle = 0;
  111. }
  112. if (net_device->recv_buf) {
  113. /* Free up the receive buffer */
  114. free_pages((unsigned long)net_device->recv_buf,
  115. get_order(net_device->recv_buf_size));
  116. net_device->recv_buf = NULL;
  117. }
  118. if (net_device->recv_section) {
  119. net_device->recv_section_cnt = 0;
  120. kfree(net_device->recv_section);
  121. net_device->recv_section = NULL;
  122. }
  123. return ret;
  124. }
  125. static int netvsc_init_recv_buf(struct hv_device *device)
  126. {
  127. int ret = 0;
  128. int t;
  129. struct netvsc_device *net_device;
  130. struct nvsp_message *init_packet;
  131. struct net_device *ndev;
  132. net_device = get_outbound_net_device(device);
  133. if (!net_device)
  134. return -ENODEV;
  135. ndev = net_device->ndev;
  136. net_device->recv_buf =
  137. (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
  138. get_order(net_device->recv_buf_size));
  139. if (!net_device->recv_buf) {
  140. netdev_err(ndev, "unable to allocate receive "
  141. "buffer of size %d\n", net_device->recv_buf_size);
  142. ret = -ENOMEM;
  143. goto cleanup;
  144. }
  145. /*
  146. * Establish the gpadl handle for this buffer on this
  147. * channel. Note: This call uses the vmbus connection rather
  148. * than the channel to establish the gpadl handle.
  149. */
  150. ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
  151. net_device->recv_buf_size,
  152. &net_device->recv_buf_gpadl_handle);
  153. if (ret != 0) {
  154. netdev_err(ndev,
  155. "unable to establish receive buffer's gpadl\n");
  156. goto cleanup;
  157. }
  158. /* Notify the NetVsp of the gpadl handle */
  159. init_packet = &net_device->channel_init_pkt;
  160. memset(init_packet, 0, sizeof(struct nvsp_message));
  161. init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
  162. init_packet->msg.v1_msg.send_recv_buf.
  163. gpadl_handle = net_device->recv_buf_gpadl_handle;
  164. init_packet->msg.v1_msg.
  165. send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
  166. /* Send the gpadl notification request */
  167. ret = vmbus_sendpacket(device->channel, init_packet,
  168. sizeof(struct nvsp_message),
  169. (unsigned long)init_packet,
  170. VM_PKT_DATA_INBAND,
  171. VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
  172. if (ret != 0) {
  173. netdev_err(ndev,
  174. "unable to send receive buffer's gpadl to netvsp\n");
  175. goto cleanup;
  176. }
  177. t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
  178. BUG_ON(t == 0);
  179. /* Check the response */
  180. if (init_packet->msg.v1_msg.
  181. send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
  182. netdev_err(ndev, "Unable to complete receive buffer "
  183. "initialization with NetVsp - status %d\n",
  184. init_packet->msg.v1_msg.
  185. send_recv_buf_complete.status);
  186. ret = -EINVAL;
  187. goto cleanup;
  188. }
  189. /* Parse the response */
  190. net_device->recv_section_cnt = init_packet->msg.
  191. v1_msg.send_recv_buf_complete.num_sections;
  192. net_device->recv_section = kmemdup(init_packet->msg.v1_msg.send_recv_buf_complete.sections,
  193. net_device->recv_section_cnt * sizeof(struct nvsp_1_receive_buffer_section),
  194. GFP_KERNEL);
  195. if (net_device->recv_section == NULL) {
  196. ret = -EINVAL;
  197. goto cleanup;
  198. }
  199. /*
  200. * For 1st release, there should only be 1 section that represents the
  201. * entire receive buffer
  202. */
  203. if (net_device->recv_section_cnt != 1 ||
  204. net_device->recv_section->offset != 0) {
  205. ret = -EINVAL;
  206. goto cleanup;
  207. }
  208. goto exit;
  209. cleanup:
  210. netvsc_destroy_recv_buf(net_device);
  211. exit:
  212. return ret;
  213. }
  214. static int netvsc_connect_vsp(struct hv_device *device)
  215. {
  216. int ret, t;
  217. struct netvsc_device *net_device;
  218. struct nvsp_message *init_packet;
  219. int ndis_version;
  220. struct net_device *ndev;
  221. net_device = get_outbound_net_device(device);
  222. if (!net_device)
  223. return -ENODEV;
  224. ndev = net_device->ndev;
  225. init_packet = &net_device->channel_init_pkt;
  226. memset(init_packet, 0, sizeof(struct nvsp_message));
  227. init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
  228. init_packet->msg.init_msg.init.min_protocol_ver =
  229. NVSP_MIN_PROTOCOL_VERSION;
  230. init_packet->msg.init_msg.init.max_protocol_ver =
  231. NVSP_MAX_PROTOCOL_VERSION;
  232. /* Send the init request */
  233. ret = vmbus_sendpacket(device->channel, init_packet,
  234. sizeof(struct nvsp_message),
  235. (unsigned long)init_packet,
  236. VM_PKT_DATA_INBAND,
  237. VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
  238. if (ret != 0)
  239. goto cleanup;
  240. t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
  241. if (t == 0) {
  242. ret = -ETIMEDOUT;
  243. goto cleanup;
  244. }
  245. if (init_packet->msg.init_msg.init_complete.status !=
  246. NVSP_STAT_SUCCESS) {
  247. ret = -EINVAL;
  248. goto cleanup;
  249. }
  250. if (init_packet->msg.init_msg.init_complete.
  251. negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
  252. ret = -EPROTO;
  253. goto cleanup;
  254. }
  255. /* Send the ndis version */
  256. memset(init_packet, 0, sizeof(struct nvsp_message));
  257. ndis_version = 0x00050000;
  258. init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
  259. init_packet->msg.v1_msg.
  260. send_ndis_ver.ndis_major_ver =
  261. (ndis_version & 0xFFFF0000) >> 16;
  262. init_packet->msg.v1_msg.
  263. send_ndis_ver.ndis_minor_ver =
  264. ndis_version & 0xFFFF;
  265. /* Send the init request */
  266. ret = vmbus_sendpacket(device->channel, init_packet,
  267. sizeof(struct nvsp_message),
  268. (unsigned long)init_packet,
  269. VM_PKT_DATA_INBAND, 0);
  270. if (ret != 0)
  271. goto cleanup;
  272. /* Post the big receive buffer to NetVSP */
  273. ret = netvsc_init_recv_buf(device);
  274. cleanup:
  275. return ret;
  276. }
  277. static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
  278. {
  279. netvsc_destroy_recv_buf(net_device);
  280. }
  281. /*
  282. * netvsc_device_remove - Callback when the root bus device is removed
  283. */
  284. int netvsc_device_remove(struct hv_device *device)
  285. {
  286. struct netvsc_device *net_device;
  287. struct hv_netvsc_packet *netvsc_packet, *pos;
  288. unsigned long flags;
  289. net_device = hv_get_drvdata(device);
  290. spin_lock_irqsave(&device->channel->inbound_lock, flags);
  291. net_device->destroy = true;
  292. spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
  293. /* Wait for all send completions */
  294. while (atomic_read(&net_device->num_outstanding_sends)) {
  295. dev_info(&device->device,
  296. "waiting for %d requests to complete...\n",
  297. atomic_read(&net_device->num_outstanding_sends));
  298. udelay(100);
  299. }
  300. netvsc_disconnect_vsp(net_device);
  301. /*
  302. * Since we have already drained, we don't need to busy wait
  303. * as was done in final_release_stor_device()
  304. * Note that we cannot set the ext pointer to NULL until
  305. * we have drained - to drain the outgoing packets, we need to
  306. * allow incoming packets.
  307. */
  308. spin_lock_irqsave(&device->channel->inbound_lock, flags);
  309. hv_set_drvdata(device, NULL);
  310. spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
  311. /*
  312. * At this point, no one should be accessing net_device
  313. * except in here
  314. */
  315. dev_notice(&device->device, "net device safe to remove\n");
  316. /* Now, we can close the channel safely */
  317. vmbus_close(device->channel);
  318. /* Release all resources */
  319. list_for_each_entry_safe(netvsc_packet, pos,
  320. &net_device->recv_pkt_list, list_ent) {
  321. list_del(&netvsc_packet->list_ent);
  322. kfree(netvsc_packet);
  323. }
  324. kfree(net_device);
  325. return 0;
  326. }
  327. static void netvsc_send_completion(struct hv_device *device,
  328. struct vmpacket_descriptor *packet)
  329. {
  330. struct netvsc_device *net_device;
  331. struct nvsp_message *nvsp_packet;
  332. struct hv_netvsc_packet *nvsc_packet;
  333. struct net_device *ndev;
  334. net_device = get_inbound_net_device(device);
  335. if (!net_device)
  336. return;
  337. ndev = net_device->ndev;
  338. nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
  339. (packet->offset8 << 3));
  340. if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
  341. (nvsp_packet->hdr.msg_type ==
  342. NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
  343. (nvsp_packet->hdr.msg_type ==
  344. NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
  345. /* Copy the response back */
  346. memcpy(&net_device->channel_init_pkt, nvsp_packet,
  347. sizeof(struct nvsp_message));
  348. complete(&net_device->channel_init_wait);
  349. } else if (nvsp_packet->hdr.msg_type ==
  350. NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
  351. /* Get the send context */
  352. nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
  353. packet->trans_id;
  354. /* Notify the layer above us */
  355. nvsc_packet->completion.send.send_completion(
  356. nvsc_packet->completion.send.send_completion_ctx);
  357. atomic_dec(&net_device->num_outstanding_sends);
  358. } else {
  359. netdev_err(ndev, "Unknown send completion packet type- "
  360. "%d received!!\n", nvsp_packet->hdr.msg_type);
  361. }
  362. }
  363. int netvsc_send(struct hv_device *device,
  364. struct hv_netvsc_packet *packet)
  365. {
  366. struct netvsc_device *net_device;
  367. int ret = 0;
  368. struct nvsp_message sendMessage;
  369. struct net_device *ndev;
  370. net_device = get_outbound_net_device(device);
  371. if (!net_device)
  372. return -ENODEV;
  373. ndev = net_device->ndev;
  374. sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
  375. if (packet->is_data_pkt) {
  376. /* 0 is RMC_DATA; */
  377. sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
  378. } else {
  379. /* 1 is RMC_CONTROL; */
  380. sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
  381. }
  382. /* Not using send buffer section */
  383. sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
  384. 0xFFFFFFFF;
  385. sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
  386. if (packet->page_buf_cnt) {
  387. ret = vmbus_sendpacket_pagebuffer(device->channel,
  388. packet->page_buf,
  389. packet->page_buf_cnt,
  390. &sendMessage,
  391. sizeof(struct nvsp_message),
  392. (unsigned long)packet);
  393. } else {
  394. ret = vmbus_sendpacket(device->channel, &sendMessage,
  395. sizeof(struct nvsp_message),
  396. (unsigned long)packet,
  397. VM_PKT_DATA_INBAND,
  398. VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
  399. }
  400. if (ret != 0)
  401. netdev_err(ndev, "Unable to send packet %p ret %d\n",
  402. packet, ret);
  403. else
  404. atomic_inc(&net_device->num_outstanding_sends);
  405. return ret;
  406. }
  407. static void netvsc_send_recv_completion(struct hv_device *device,
  408. u64 transaction_id)
  409. {
  410. struct nvsp_message recvcompMessage;
  411. int retries = 0;
  412. int ret;
  413. struct net_device *ndev;
  414. struct netvsc_device *net_device = hv_get_drvdata(device);
  415. ndev = net_device->ndev;
  416. recvcompMessage.hdr.msg_type =
  417. NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
  418. /* FIXME: Pass in the status */
  419. recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
  420. NVSP_STAT_SUCCESS;
  421. retry_send_cmplt:
  422. /* Send the completion */
  423. ret = vmbus_sendpacket(device->channel, &recvcompMessage,
  424. sizeof(struct nvsp_message), transaction_id,
  425. VM_PKT_COMP, 0);
  426. if (ret == 0) {
  427. /* success */
  428. /* no-op */
  429. } else if (ret == -EAGAIN) {
  430. /* no more room...wait a bit and attempt to retry 3 times */
  431. retries++;
  432. netdev_err(ndev, "unable to send receive completion pkt"
  433. " (tid %llx)...retrying %d\n", transaction_id, retries);
  434. if (retries < 4) {
  435. udelay(100);
  436. goto retry_send_cmplt;
  437. } else {
  438. netdev_err(ndev, "unable to send receive "
  439. "completion pkt (tid %llx)...give up retrying\n",
  440. transaction_id);
  441. }
  442. } else {
  443. netdev_err(ndev, "unable to send receive "
  444. "completion pkt - %llx\n", transaction_id);
  445. }
  446. }
  447. /* Send a receive completion packet to RNDIS device (ie NetVsp) */
  448. static void netvsc_receive_completion(void *context)
  449. {
  450. struct hv_netvsc_packet *packet = context;
  451. struct hv_device *device = (struct hv_device *)packet->device;
  452. struct netvsc_device *net_device;
  453. u64 transaction_id = 0;
  454. bool fsend_receive_comp = false;
  455. unsigned long flags;
  456. struct net_device *ndev;
  457. /*
  458. * Even though it seems logical to do a GetOutboundNetDevice() here to
  459. * send out receive completion, we are using GetInboundNetDevice()
  460. * since we may have disable outbound traffic already.
  461. */
  462. net_device = get_inbound_net_device(device);
  463. if (!net_device)
  464. return;
  465. ndev = net_device->ndev;
  466. /* Overloading use of the lock. */
  467. spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
  468. packet->xfer_page_pkt->count--;
  469. /*
  470. * Last one in the line that represent 1 xfer page packet.
  471. * Return the xfer page packet itself to the freelist
  472. */
  473. if (packet->xfer_page_pkt->count == 0) {
  474. fsend_receive_comp = true;
  475. transaction_id = packet->completion.recv.recv_completion_tid;
  476. list_add_tail(&packet->xfer_page_pkt->list_ent,
  477. &net_device->recv_pkt_list);
  478. }
  479. /* Put the packet back */
  480. list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
  481. spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
  482. /* Send a receive completion for the xfer page packet */
  483. if (fsend_receive_comp)
  484. netvsc_send_recv_completion(device, transaction_id);
  485. }
  486. static void netvsc_receive(struct hv_device *device,
  487. struct vmpacket_descriptor *packet)
  488. {
  489. struct netvsc_device *net_device;
  490. struct vmtransfer_page_packet_header *vmxferpage_packet;
  491. struct nvsp_message *nvsp_packet;
  492. struct hv_netvsc_packet *netvsc_packet = NULL;
  493. unsigned long start;
  494. unsigned long end, end_virtual;
  495. /* struct netvsc_driver *netvscDriver; */
  496. struct xferpage_packet *xferpage_packet = NULL;
  497. int i, j;
  498. int count = 0, bytes_remain = 0;
  499. unsigned long flags;
  500. struct net_device *ndev;
  501. LIST_HEAD(listHead);
  502. net_device = get_inbound_net_device(device);
  503. if (!net_device)
  504. return;
  505. ndev = net_device->ndev;
  506. /*
  507. * All inbound packets other than send completion should be xfer page
  508. * packet
  509. */
  510. if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
  511. netdev_err(ndev, "Unknown packet type received - %d\n",
  512. packet->type);
  513. return;
  514. }
  515. nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
  516. (packet->offset8 << 3));
  517. /* Make sure this is a valid nvsp packet */
  518. if (nvsp_packet->hdr.msg_type !=
  519. NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
  520. netdev_err(ndev, "Unknown nvsp packet type received-"
  521. " %d\n", nvsp_packet->hdr.msg_type);
  522. return;
  523. }
  524. vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
  525. if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
  526. netdev_err(ndev, "Invalid xfer page set id - "
  527. "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
  528. vmxferpage_packet->xfer_pageset_id);
  529. return;
  530. }
  531. /*
  532. * Grab free packets (range count + 1) to represent this xfer
  533. * page packet. +1 to represent the xfer page packet itself.
  534. * We grab it here so that we know exactly how many we can
  535. * fulfil
  536. */
  537. spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
  538. while (!list_empty(&net_device->recv_pkt_list)) {
  539. list_move_tail(net_device->recv_pkt_list.next, &listHead);
  540. if (++count == vmxferpage_packet->range_cnt + 1)
  541. break;
  542. }
  543. spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
  544. /*
  545. * We need at least 2 netvsc pkts (1 to represent the xfer
  546. * page and at least 1 for the range) i.e. we can handled
  547. * some of the xfer page packet ranges...
  548. */
  549. if (count < 2) {
  550. netdev_err(ndev, "Got only %d netvsc pkt...needed "
  551. "%d pkts. Dropping this xfer page packet completely!\n",
  552. count, vmxferpage_packet->range_cnt + 1);
  553. /* Return it to the freelist */
  554. spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
  555. for (i = count; i != 0; i--) {
  556. list_move_tail(listHead.next,
  557. &net_device->recv_pkt_list);
  558. }
  559. spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
  560. flags);
  561. netvsc_send_recv_completion(device,
  562. vmxferpage_packet->d.trans_id);
  563. return;
  564. }
  565. /* Remove the 1st packet to represent the xfer page packet itself */
  566. xferpage_packet = (struct xferpage_packet *)listHead.next;
  567. list_del(&xferpage_packet->list_ent);
  568. /* This is how much we can satisfy */
  569. xferpage_packet->count = count - 1;
  570. if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
  571. netdev_err(ndev, "Needed %d netvsc pkts to satisfy "
  572. "this xfer page...got %d\n",
  573. vmxferpage_packet->range_cnt, xferpage_packet->count);
  574. }
  575. /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
  576. for (i = 0; i < (count - 1); i++) {
  577. netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
  578. list_del(&netvsc_packet->list_ent);
  579. /* Initialize the netvsc packet */
  580. netvsc_packet->xfer_page_pkt = xferpage_packet;
  581. netvsc_packet->completion.recv.recv_completion =
  582. netvsc_receive_completion;
  583. netvsc_packet->completion.recv.recv_completion_ctx =
  584. netvsc_packet;
  585. netvsc_packet->device = device;
  586. /* Save this so that we can send it back */
  587. netvsc_packet->completion.recv.recv_completion_tid =
  588. vmxferpage_packet->d.trans_id;
  589. netvsc_packet->total_data_buflen =
  590. vmxferpage_packet->ranges[i].byte_count;
  591. netvsc_packet->page_buf_cnt = 1;
  592. netvsc_packet->page_buf[0].len =
  593. vmxferpage_packet->ranges[i].byte_count;
  594. start = virt_to_phys((void *)((unsigned long)net_device->
  595. recv_buf + vmxferpage_packet->ranges[i].byte_offset));
  596. netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
  597. end_virtual = (unsigned long)net_device->recv_buf
  598. + vmxferpage_packet->ranges[i].byte_offset
  599. + vmxferpage_packet->ranges[i].byte_count - 1;
  600. end = virt_to_phys((void *)end_virtual);
  601. /* Calculate the page relative offset */
  602. netvsc_packet->page_buf[0].offset =
  603. vmxferpage_packet->ranges[i].byte_offset &
  604. (PAGE_SIZE - 1);
  605. if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
  606. /* Handle frame across multiple pages: */
  607. netvsc_packet->page_buf[0].len =
  608. (netvsc_packet->page_buf[0].pfn <<
  609. PAGE_SHIFT)
  610. + PAGE_SIZE - start;
  611. bytes_remain = netvsc_packet->total_data_buflen -
  612. netvsc_packet->page_buf[0].len;
  613. for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
  614. netvsc_packet->page_buf[j].offset = 0;
  615. if (bytes_remain <= PAGE_SIZE) {
  616. netvsc_packet->page_buf[j].len =
  617. bytes_remain;
  618. bytes_remain = 0;
  619. } else {
  620. netvsc_packet->page_buf[j].len =
  621. PAGE_SIZE;
  622. bytes_remain -= PAGE_SIZE;
  623. }
  624. netvsc_packet->page_buf[j].pfn =
  625. virt_to_phys((void *)(end_virtual -
  626. bytes_remain)) >> PAGE_SHIFT;
  627. netvsc_packet->page_buf_cnt++;
  628. if (bytes_remain == 0)
  629. break;
  630. }
  631. }
  632. /* Pass it to the upper layer */
  633. rndis_filter_receive(device, netvsc_packet);
  634. netvsc_receive_completion(netvsc_packet->
  635. completion.recv.recv_completion_ctx);
  636. }
  637. }
  638. static void netvsc_channel_cb(void *context)
  639. {
  640. int ret;
  641. struct hv_device *device = context;
  642. struct netvsc_device *net_device;
  643. u32 bytes_recvd;
  644. u64 request_id;
  645. unsigned char *packet;
  646. struct vmpacket_descriptor *desc;
  647. unsigned char *buffer;
  648. int bufferlen = NETVSC_PACKET_SIZE;
  649. struct net_device *ndev;
  650. packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
  651. GFP_ATOMIC);
  652. if (!packet)
  653. return;
  654. buffer = packet;
  655. net_device = get_inbound_net_device(device);
  656. if (!net_device)
  657. goto out;
  658. ndev = net_device->ndev;
  659. do {
  660. ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
  661. &bytes_recvd, &request_id);
  662. if (ret == 0) {
  663. if (bytes_recvd > 0) {
  664. desc = (struct vmpacket_descriptor *)buffer;
  665. switch (desc->type) {
  666. case VM_PKT_COMP:
  667. netvsc_send_completion(device, desc);
  668. break;
  669. case VM_PKT_DATA_USING_XFER_PAGES:
  670. netvsc_receive(device, desc);
  671. break;
  672. default:
  673. netdev_err(ndev,
  674. "unhandled packet type %d, "
  675. "tid %llx len %d\n",
  676. desc->type, request_id,
  677. bytes_recvd);
  678. break;
  679. }
  680. /* reset */
  681. if (bufferlen > NETVSC_PACKET_SIZE) {
  682. kfree(buffer);
  683. buffer = packet;
  684. bufferlen = NETVSC_PACKET_SIZE;
  685. }
  686. } else {
  687. /* reset */
  688. if (bufferlen > NETVSC_PACKET_SIZE) {
  689. kfree(buffer);
  690. buffer = packet;
  691. bufferlen = NETVSC_PACKET_SIZE;
  692. }
  693. break;
  694. }
  695. } else if (ret == -ENOBUFS) {
  696. /* Handle large packet */
  697. buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
  698. if (buffer == NULL) {
  699. /* Try again next time around */
  700. netdev_err(ndev,
  701. "unable to allocate buffer of size "
  702. "(%d)!!\n", bytes_recvd);
  703. break;
  704. }
  705. bufferlen = bytes_recvd;
  706. }
  707. } while (1);
  708. out:
  709. kfree(buffer);
  710. return;
  711. }
  712. /*
  713. * netvsc_device_add - Callback when the device belonging to this
  714. * driver is added
  715. */
  716. int netvsc_device_add(struct hv_device *device, void *additional_info)
  717. {
  718. int ret = 0;
  719. int i;
  720. int ring_size =
  721. ((struct netvsc_device_info *)additional_info)->ring_size;
  722. struct netvsc_device *net_device;
  723. struct hv_netvsc_packet *packet, *pos;
  724. struct net_device *ndev;
  725. net_device = alloc_net_device(device);
  726. if (!net_device) {
  727. ret = -ENOMEM;
  728. goto cleanup;
  729. }
  730. /*
  731. * Coming into this function, struct net_device * is
  732. * registered as the driver private data.
  733. * In alloc_net_device(), we register struct netvsc_device *
  734. * as the driver private data and stash away struct net_device *
  735. * in struct netvsc_device *.
  736. */
  737. ndev = net_device->ndev;
  738. /* Initialize the NetVSC channel extension */
  739. net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
  740. spin_lock_init(&net_device->recv_pkt_list_lock);
  741. INIT_LIST_HEAD(&net_device->recv_pkt_list);
  742. for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
  743. packet = kzalloc(sizeof(struct hv_netvsc_packet) +
  744. (NETVSC_RECEIVE_SG_COUNT *
  745. sizeof(struct hv_page_buffer)), GFP_KERNEL);
  746. if (!packet)
  747. break;
  748. list_add_tail(&packet->list_ent,
  749. &net_device->recv_pkt_list);
  750. }
  751. init_completion(&net_device->channel_init_wait);
  752. /* Open the channel */
  753. ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
  754. ring_size * PAGE_SIZE, NULL, 0,
  755. netvsc_channel_cb, device);
  756. if (ret != 0) {
  757. netdev_err(ndev, "unable to open channel: %d\n", ret);
  758. goto cleanup;
  759. }
  760. /* Channel is opened */
  761. pr_info("hv_netvsc channel opened successfully\n");
  762. /* Connect with the NetVsp */
  763. ret = netvsc_connect_vsp(device);
  764. if (ret != 0) {
  765. netdev_err(ndev,
  766. "unable to connect to NetVSP - %d\n", ret);
  767. goto close;
  768. }
  769. return ret;
  770. close:
  771. /* Now, we can close the channel safely */
  772. vmbus_close(device->channel);
  773. cleanup:
  774. if (net_device) {
  775. list_for_each_entry_safe(packet, pos,
  776. &net_device->recv_pkt_list,
  777. list_ent) {
  778. list_del(&packet->list_ent);
  779. kfree(packet);
  780. }
  781. kfree(net_device);
  782. }
  783. return ret;
  784. }