vnic_dev.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690
  1. /*
  2. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/errno.h>
  20. #include <linux/types.h>
  21. #include <linux/pci.h>
  22. #include <linux/delay.h>
  23. #include <linux/if_ether.h>
  24. #include "vnic_resource.h"
  25. #include "vnic_devcmd.h"
  26. #include "vnic_dev.h"
  27. #include "vnic_stats.h"
  28. struct vnic_res {
  29. void __iomem *vaddr;
  30. unsigned int count;
  31. };
  32. struct vnic_dev {
  33. void *priv;
  34. struct pci_dev *pdev;
  35. struct vnic_res res[RES_TYPE_MAX];
  36. enum vnic_dev_intr_mode intr_mode;
  37. struct vnic_devcmd __iomem *devcmd;
  38. struct vnic_devcmd_notify *notify;
  39. struct vnic_devcmd_notify notify_copy;
  40. dma_addr_t notify_pa;
  41. u32 *linkstatus;
  42. dma_addr_t linkstatus_pa;
  43. struct vnic_stats *stats;
  44. dma_addr_t stats_pa;
  45. struct vnic_devcmd_fw_info *fw_info;
  46. dma_addr_t fw_info_pa;
  47. };
  48. #define VNIC_MAX_RES_HDR_SIZE \
  49. (sizeof(struct vnic_resource_header) + \
  50. sizeof(struct vnic_resource) * RES_TYPE_MAX)
  51. #define VNIC_RES_STRIDE 128
  52. void *vnic_dev_priv(struct vnic_dev *vdev)
  53. {
  54. return vdev->priv;
  55. }
  56. static int vnic_dev_discover_res(struct vnic_dev *vdev,
  57. struct vnic_dev_bar *bar)
  58. {
  59. struct vnic_resource_header __iomem *rh;
  60. struct vnic_resource __iomem *r;
  61. u8 type;
  62. if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
  63. printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
  64. return -EINVAL;
  65. }
  66. rh = bar->vaddr;
  67. if (!rh) {
  68. printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
  69. return -EINVAL;
  70. }
  71. if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
  72. ioread32(&rh->version) != VNIC_RES_VERSION) {
  73. printk(KERN_ERR "vNIC BAR0 res magic/version error "
  74. "exp (%lx/%lx) curr (%x/%x)\n",
  75. VNIC_RES_MAGIC, VNIC_RES_VERSION,
  76. ioread32(&rh->magic), ioread32(&rh->version));
  77. return -EINVAL;
  78. }
  79. r = (struct vnic_resource __iomem *)(rh + 1);
  80. while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
  81. u8 bar_num = ioread8(&r->bar);
  82. u32 bar_offset = ioread32(&r->bar_offset);
  83. u32 count = ioread32(&r->count);
  84. u32 len;
  85. r++;
  86. if (bar_num != 0) /* only mapping in BAR0 resources */
  87. continue;
  88. switch (type) {
  89. case RES_TYPE_WQ:
  90. case RES_TYPE_RQ:
  91. case RES_TYPE_CQ:
  92. case RES_TYPE_INTR_CTRL:
  93. /* each count is stride bytes long */
  94. len = count * VNIC_RES_STRIDE;
  95. if (len + bar_offset > bar->len) {
  96. printk(KERN_ERR "vNIC BAR0 resource %d "
  97. "out-of-bounds, offset 0x%x + "
  98. "size 0x%x > bar len 0x%lx\n",
  99. type, bar_offset,
  100. len,
  101. bar->len);
  102. return -EINVAL;
  103. }
  104. break;
  105. case RES_TYPE_INTR_PBA_LEGACY:
  106. case RES_TYPE_DEVCMD:
  107. len = count;
  108. break;
  109. default:
  110. continue;
  111. }
  112. vdev->res[type].count = count;
  113. vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
  114. }
  115. return 0;
  116. }
  117. unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
  118. enum vnic_res_type type)
  119. {
  120. return vdev->res[type].count;
  121. }
  122. void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
  123. unsigned int index)
  124. {
  125. if (!vdev->res[type].vaddr)
  126. return NULL;
  127. switch (type) {
  128. case RES_TYPE_WQ:
  129. case RES_TYPE_RQ:
  130. case RES_TYPE_CQ:
  131. case RES_TYPE_INTR_CTRL:
  132. return (char __iomem *)vdev->res[type].vaddr +
  133. index * VNIC_RES_STRIDE;
  134. default:
  135. return (char __iomem *)vdev->res[type].vaddr;
  136. }
  137. }
  138. unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
  139. unsigned int desc_count,
  140. unsigned int desc_size)
  141. {
  142. /* The base address of the desc rings must be 512 byte aligned.
  143. * Descriptor count is aligned to groups of 32 descriptors. A
  144. * count of 0 means the maximum 4096 descriptors. Descriptor
  145. * size is aligned to 16 bytes.
  146. */
  147. unsigned int count_align = 32;
  148. unsigned int desc_align = 16;
  149. ring->base_align = 512;
  150. if (desc_count == 0)
  151. desc_count = 4096;
  152. ring->desc_count = ALIGN(desc_count, count_align);
  153. ring->desc_size = ALIGN(desc_size, desc_align);
  154. ring->size = ring->desc_count * ring->desc_size;
  155. ring->size_unaligned = ring->size + ring->base_align;
  156. return ring->size_unaligned;
  157. }
  158. void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
  159. {
  160. memset(ring->descs, 0, ring->size);
  161. }
  162. int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
  163. unsigned int desc_count, unsigned int desc_size)
  164. {
  165. vnic_dev_desc_ring_size(ring, desc_count, desc_size);
  166. ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
  167. ring->size_unaligned,
  168. &ring->base_addr_unaligned);
  169. if (!ring->descs_unaligned) {
  170. printk(KERN_ERR
  171. "Failed to allocate ring (size=%d), aborting\n",
  172. (int)ring->size);
  173. return -ENOMEM;
  174. }
  175. ring->base_addr = ALIGN(ring->base_addr_unaligned,
  176. ring->base_align);
  177. ring->descs = (u8 *)ring->descs_unaligned +
  178. (ring->base_addr - ring->base_addr_unaligned);
  179. vnic_dev_clear_desc_ring(ring);
  180. ring->desc_avail = ring->desc_count - 1;
  181. return 0;
  182. }
  183. void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
  184. {
  185. if (ring->descs) {
  186. pci_free_consistent(vdev->pdev,
  187. ring->size_unaligned,
  188. ring->descs_unaligned,
  189. ring->base_addr_unaligned);
  190. ring->descs = NULL;
  191. }
  192. }
  193. int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  194. u64 *a0, u64 *a1, int wait)
  195. {
  196. struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
  197. int delay;
  198. u32 status;
  199. int dev_cmd_err[] = {
  200. /* convert from fw's version of error.h to host's version */
  201. 0, /* ERR_SUCCESS */
  202. EINVAL, /* ERR_EINVAL */
  203. EFAULT, /* ERR_EFAULT */
  204. EPERM, /* ERR_EPERM */
  205. EBUSY, /* ERR_EBUSY */
  206. };
  207. int err;
  208. status = ioread32(&devcmd->status);
  209. if (status & STAT_BUSY) {
  210. printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
  211. return -EBUSY;
  212. }
  213. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
  214. writeq(*a0, &devcmd->args[0]);
  215. writeq(*a1, &devcmd->args[1]);
  216. wmb();
  217. }
  218. iowrite32(cmd, &devcmd->cmd);
  219. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  220. return 0;
  221. for (delay = 0; delay < wait; delay++) {
  222. udelay(100);
  223. status = ioread32(&devcmd->status);
  224. if (!(status & STAT_BUSY)) {
  225. if (status & STAT_ERROR) {
  226. err = dev_cmd_err[(int)readq(&devcmd->args[0])];
  227. printk(KERN_ERR "Error %d devcmd %d\n",
  228. err, _CMD_N(cmd));
  229. return -err;
  230. }
  231. if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
  232. rmb();
  233. *a0 = readq(&devcmd->args[0]);
  234. *a1 = readq(&devcmd->args[1]);
  235. }
  236. return 0;
  237. }
  238. }
  239. printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
  240. return -ETIMEDOUT;
  241. }
  242. int vnic_dev_fw_info(struct vnic_dev *vdev,
  243. struct vnic_devcmd_fw_info **fw_info)
  244. {
  245. u64 a0, a1 = 0;
  246. int wait = 1000;
  247. int err = 0;
  248. if (!vdev->fw_info) {
  249. vdev->fw_info = pci_alloc_consistent(vdev->pdev,
  250. sizeof(struct vnic_devcmd_fw_info),
  251. &vdev->fw_info_pa);
  252. if (!vdev->fw_info)
  253. return -ENOMEM;
  254. a0 = vdev->fw_info_pa;
  255. /* only get fw_info once and cache it */
  256. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
  257. }
  258. *fw_info = vdev->fw_info;
  259. return err;
  260. }
  261. int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
  262. void *value)
  263. {
  264. u64 a0, a1;
  265. int wait = 1000;
  266. int err;
  267. a0 = offset;
  268. a1 = size;
  269. err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
  270. switch (size) {
  271. case 1:
  272. *(u8 *)value = (u8)a0;
  273. break;
  274. case 2:
  275. *(u16 *)value = (u16)a0;
  276. break;
  277. case 4:
  278. *(u32 *)value = (u32)a0;
  279. break;
  280. case 8:
  281. *(u64 *)value = a0;
  282. break;
  283. default:
  284. BUG();
  285. break;
  286. }
  287. return err;
  288. }
  289. int vnic_dev_stats_clear(struct vnic_dev *vdev)
  290. {
  291. u64 a0 = 0, a1 = 0;
  292. int wait = 1000;
  293. return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
  294. }
  295. int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
  296. {
  297. u64 a0, a1;
  298. int wait = 1000;
  299. if (!vdev->stats) {
  300. vdev->stats = pci_alloc_consistent(vdev->pdev,
  301. sizeof(struct vnic_stats), &vdev->stats_pa);
  302. if (!vdev->stats)
  303. return -ENOMEM;
  304. }
  305. *stats = vdev->stats;
  306. a0 = vdev->stats_pa;
  307. a1 = sizeof(struct vnic_stats);
  308. return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
  309. }
  310. int vnic_dev_close(struct vnic_dev *vdev)
  311. {
  312. u64 a0 = 0, a1 = 0;
  313. int wait = 1000;
  314. return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
  315. }
  316. int vnic_dev_enable(struct vnic_dev *vdev)
  317. {
  318. u64 a0 = 0, a1 = 0;
  319. int wait = 1000;
  320. return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
  321. }
  322. int vnic_dev_disable(struct vnic_dev *vdev)
  323. {
  324. u64 a0 = 0, a1 = 0;
  325. int wait = 1000;
  326. return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
  327. }
  328. int vnic_dev_open(struct vnic_dev *vdev, int arg)
  329. {
  330. u64 a0 = (u32)arg, a1 = 0;
  331. int wait = 1000;
  332. return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
  333. }
  334. int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
  335. {
  336. u64 a0 = 0, a1 = 0;
  337. int wait = 1000;
  338. int err;
  339. *done = 0;
  340. err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
  341. if (err)
  342. return err;
  343. *done = (a0 == 0);
  344. return 0;
  345. }
  346. int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
  347. {
  348. u64 a0 = (u32)arg, a1 = 0;
  349. int wait = 1000;
  350. return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
  351. }
  352. int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
  353. {
  354. u64 a0 = 0, a1 = 0;
  355. int wait = 1000;
  356. int err;
  357. *done = 0;
  358. err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
  359. if (err)
  360. return err;
  361. *done = (a0 == 0);
  362. return 0;
  363. }
  364. int vnic_dev_hang_notify(struct vnic_dev *vdev)
  365. {
  366. u64 a0, a1;
  367. int wait = 1000;
  368. return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
  369. }
  370. int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  371. {
  372. u64 a0, a1;
  373. int wait = 1000;
  374. int err, i;
  375. for (i = 0; i < ETH_ALEN; i++)
  376. mac_addr[i] = 0;
  377. err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
  378. if (err)
  379. return err;
  380. for (i = 0; i < ETH_ALEN; i++)
  381. mac_addr[i] = ((u8 *)&a0)[i];
  382. return 0;
  383. }
  384. void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
  385. int broadcast, int promisc, int allmulti)
  386. {
  387. u64 a0, a1 = 0;
  388. int wait = 1000;
  389. int err;
  390. a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
  391. (multicast ? CMD_PFILTER_MULTICAST : 0) |
  392. (broadcast ? CMD_PFILTER_BROADCAST : 0) |
  393. (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
  394. (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
  395. err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
  396. if (err)
  397. printk(KERN_ERR "Can't set packet filter\n");
  398. }
  399. void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
  400. {
  401. u64 a0 = 0, a1 = 0;
  402. int wait = 1000;
  403. int err;
  404. int i;
  405. for (i = 0; i < ETH_ALEN; i++)
  406. ((u8 *)&a0)[i] = addr[i];
  407. err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
  408. if (err)
  409. printk(KERN_ERR
  410. "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
  411. addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
  412. err);
  413. }
  414. void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
  415. {
  416. u64 a0 = 0, a1 = 0;
  417. int wait = 1000;
  418. int err;
  419. int i;
  420. for (i = 0; i < ETH_ALEN; i++)
  421. ((u8 *)&a0)[i] = addr[i];
  422. err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
  423. if (err)
  424. printk(KERN_ERR
  425. "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
  426. addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
  427. err);
  428. }
  429. int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
  430. {
  431. u64 a0, a1;
  432. int wait = 1000;
  433. if (!vdev->notify) {
  434. vdev->notify = pci_alloc_consistent(vdev->pdev,
  435. sizeof(struct vnic_devcmd_notify),
  436. &vdev->notify_pa);
  437. if (!vdev->notify)
  438. return -ENOMEM;
  439. }
  440. a0 = vdev->notify_pa;
  441. a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
  442. a1 += sizeof(struct vnic_devcmd_notify);
  443. return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  444. }
  445. void vnic_dev_notify_unset(struct vnic_dev *vdev)
  446. {
  447. u64 a0, a1;
  448. int wait = 1000;
  449. a0 = 0; /* paddr = 0 to unset notify buffer */
  450. a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
  451. a1 += sizeof(struct vnic_devcmd_notify);
  452. vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  453. }
  454. static int vnic_dev_notify_ready(struct vnic_dev *vdev)
  455. {
  456. u32 *words;
  457. unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
  458. unsigned int i;
  459. u32 csum;
  460. if (!vdev->notify)
  461. return 0;
  462. do {
  463. csum = 0;
  464. memcpy(&vdev->notify_copy, vdev->notify,
  465. sizeof(struct vnic_devcmd_notify));
  466. words = (u32 *)&vdev->notify_copy;
  467. for (i = 1; i < nwords; i++)
  468. csum += words[i];
  469. } while (csum != words[0]);
  470. return 1;
  471. }
  472. int vnic_dev_init(struct vnic_dev *vdev, int arg)
  473. {
  474. u64 a0 = (u32)arg, a1 = 0;
  475. int wait = 1000;
  476. return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
  477. }
  478. int vnic_dev_link_status(struct vnic_dev *vdev)
  479. {
  480. if (vdev->linkstatus)
  481. return *vdev->linkstatus;
  482. if (!vnic_dev_notify_ready(vdev))
  483. return 0;
  484. return vdev->notify_copy.link_state;
  485. }
  486. u32 vnic_dev_port_speed(struct vnic_dev *vdev)
  487. {
  488. if (!vnic_dev_notify_ready(vdev))
  489. return 0;
  490. return vdev->notify_copy.port_speed;
  491. }
  492. u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
  493. {
  494. if (!vnic_dev_notify_ready(vdev))
  495. return 0;
  496. return vdev->notify_copy.msglvl;
  497. }
  498. u32 vnic_dev_mtu(struct vnic_dev *vdev)
  499. {
  500. if (!vnic_dev_notify_ready(vdev))
  501. return 0;
  502. return vdev->notify_copy.mtu;
  503. }
  504. u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
  505. {
  506. if (!vnic_dev_notify_ready(vdev))
  507. return 0;
  508. return vdev->notify_copy.link_down_cnt;
  509. }
  510. void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
  511. enum vnic_dev_intr_mode intr_mode)
  512. {
  513. vdev->intr_mode = intr_mode;
  514. }
  515. enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
  516. struct vnic_dev *vdev)
  517. {
  518. return vdev->intr_mode;
  519. }
  520. void vnic_dev_unregister(struct vnic_dev *vdev)
  521. {
  522. if (vdev) {
  523. if (vdev->notify)
  524. pci_free_consistent(vdev->pdev,
  525. sizeof(struct vnic_devcmd_notify),
  526. vdev->notify,
  527. vdev->notify_pa);
  528. if (vdev->linkstatus)
  529. pci_free_consistent(vdev->pdev,
  530. sizeof(u32),
  531. vdev->linkstatus,
  532. vdev->linkstatus_pa);
  533. if (vdev->stats)
  534. pci_free_consistent(vdev->pdev,
  535. sizeof(struct vnic_dev),
  536. vdev->stats, vdev->stats_pa);
  537. if (vdev->fw_info)
  538. pci_free_consistent(vdev->pdev,
  539. sizeof(struct vnic_devcmd_fw_info),
  540. vdev->fw_info, vdev->fw_info_pa);
  541. kfree(vdev);
  542. }
  543. }
  544. struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
  545. void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
  546. {
  547. if (!vdev) {
  548. vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
  549. if (!vdev)
  550. return NULL;
  551. }
  552. vdev->priv = priv;
  553. vdev->pdev = pdev;
  554. if (vnic_dev_discover_res(vdev, bar))
  555. goto err_out;
  556. vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
  557. if (!vdev->devcmd)
  558. goto err_out;
  559. return vdev;
  560. err_out:
  561. vnic_dev_unregister(vdev);
  562. return NULL;
  563. }