vnic_dev.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /*
  2. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. *
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/types.h>
  22. #include <linux/pci.h>
  23. #include <linux/delay.h>
  24. #include <linux/if_ether.h>
  25. #include "vnic_resource.h"
  26. #include "vnic_devcmd.h"
  27. #include "vnic_dev.h"
  28. #include "vnic_stats.h"
  29. struct vnic_res {
  30. void __iomem *vaddr;
  31. unsigned int count;
  32. };
  33. struct vnic_dev {
  34. void *priv;
  35. struct pci_dev *pdev;
  36. struct vnic_res res[RES_TYPE_MAX];
  37. enum vnic_dev_intr_mode intr_mode;
  38. struct vnic_devcmd __iomem *devcmd;
  39. struct vnic_devcmd_notify *notify;
  40. struct vnic_devcmd_notify notify_copy;
  41. dma_addr_t notify_pa;
  42. u32 *linkstatus;
  43. dma_addr_t linkstatus_pa;
  44. struct vnic_stats *stats;
  45. dma_addr_t stats_pa;
  46. struct vnic_devcmd_fw_info *fw_info;
  47. dma_addr_t fw_info_pa;
  48. };
  49. #define VNIC_MAX_RES_HDR_SIZE \
  50. (sizeof(struct vnic_resource_header) + \
  51. sizeof(struct vnic_resource) * RES_TYPE_MAX)
  52. #define VNIC_RES_STRIDE 128
  53. void *vnic_dev_priv(struct vnic_dev *vdev)
  54. {
  55. return vdev->priv;
  56. }
  57. static int vnic_dev_discover_res(struct vnic_dev *vdev,
  58. struct vnic_dev_bar *bar)
  59. {
  60. struct vnic_resource_header __iomem *rh;
  61. struct vnic_resource __iomem *r;
  62. u8 type;
  63. if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
  64. printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
  65. return -EINVAL;
  66. }
  67. rh = bar->vaddr;
  68. if (!rh) {
  69. printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
  70. return -EINVAL;
  71. }
  72. if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
  73. ioread32(&rh->version) != VNIC_RES_VERSION) {
  74. printk(KERN_ERR "vNIC BAR0 res magic/version error "
  75. "exp (%lx/%lx) curr (%x/%x)\n",
  76. VNIC_RES_MAGIC, VNIC_RES_VERSION,
  77. ioread32(&rh->magic), ioread32(&rh->version));
  78. return -EINVAL;
  79. }
  80. r = (struct vnic_resource __iomem *)(rh + 1);
  81. while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
  82. u8 bar_num = ioread8(&r->bar);
  83. u32 bar_offset = ioread32(&r->bar_offset);
  84. u32 count = ioread32(&r->count);
  85. u32 len;
  86. r++;
  87. if (bar_num != 0) /* only mapping in BAR0 resources */
  88. continue;
  89. switch (type) {
  90. case RES_TYPE_WQ:
  91. case RES_TYPE_RQ:
  92. case RES_TYPE_CQ:
  93. case RES_TYPE_INTR_CTRL:
  94. /* each count is stride bytes long */
  95. len = count * VNIC_RES_STRIDE;
  96. if (len + bar_offset > bar->len) {
  97. printk(KERN_ERR "vNIC BAR0 resource %d "
  98. "out-of-bounds, offset 0x%x + "
  99. "size 0x%x > bar len 0x%lx\n",
  100. type, bar_offset,
  101. len,
  102. bar->len);
  103. return -EINVAL;
  104. }
  105. break;
  106. case RES_TYPE_INTR_PBA_LEGACY:
  107. case RES_TYPE_DEVCMD:
  108. len = count;
  109. break;
  110. default:
  111. continue;
  112. }
  113. vdev->res[type].count = count;
  114. vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
  115. }
  116. return 0;
  117. }
  118. unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
  119. enum vnic_res_type type)
  120. {
  121. return vdev->res[type].count;
  122. }
  123. void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
  124. unsigned int index)
  125. {
  126. if (!vdev->res[type].vaddr)
  127. return NULL;
  128. switch (type) {
  129. case RES_TYPE_WQ:
  130. case RES_TYPE_RQ:
  131. case RES_TYPE_CQ:
  132. case RES_TYPE_INTR_CTRL:
  133. return (char __iomem *)vdev->res[type].vaddr +
  134. index * VNIC_RES_STRIDE;
  135. default:
  136. return (char __iomem *)vdev->res[type].vaddr;
  137. }
  138. }
  139. unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
  140. unsigned int desc_count, unsigned int desc_size)
  141. {
  142. /* The base address of the desc rings must be 512 byte aligned.
  143. * Descriptor count is aligned to groups of 32 descriptors. A
  144. * count of 0 means the maximum 4096 descriptors. Descriptor
  145. * size is aligned to 16 bytes.
  146. */
  147. unsigned int count_align = 32;
  148. unsigned int desc_align = 16;
  149. ring->base_align = 512;
  150. if (desc_count == 0)
  151. desc_count = 4096;
  152. ring->desc_count = ALIGN(desc_count, count_align);
  153. ring->desc_size = ALIGN(desc_size, desc_align);
  154. ring->size = ring->desc_count * ring->desc_size;
  155. ring->size_unaligned = ring->size + ring->base_align;
  156. return ring->size_unaligned;
  157. }
  158. void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
  159. {
  160. memset(ring->descs, 0, ring->size);
  161. }
  162. int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
  163. unsigned int desc_count, unsigned int desc_size)
  164. {
  165. vnic_dev_desc_ring_size(ring, desc_count, desc_size);
  166. ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
  167. ring->size_unaligned,
  168. &ring->base_addr_unaligned);
  169. if (!ring->descs_unaligned) {
  170. printk(KERN_ERR
  171. "Failed to allocate ring (size=%d), aborting\n",
  172. (int)ring->size);
  173. return -ENOMEM;
  174. }
  175. ring->base_addr = ALIGN(ring->base_addr_unaligned,
  176. ring->base_align);
  177. ring->descs = (u8 *)ring->descs_unaligned +
  178. (ring->base_addr - ring->base_addr_unaligned);
  179. vnic_dev_clear_desc_ring(ring);
  180. ring->desc_avail = ring->desc_count - 1;
  181. return 0;
  182. }
  183. void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
  184. {
  185. if (ring->descs) {
  186. pci_free_consistent(vdev->pdev,
  187. ring->size_unaligned,
  188. ring->descs_unaligned,
  189. ring->base_addr_unaligned);
  190. ring->descs = NULL;
  191. }
  192. }
  193. int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  194. u64 *a0, u64 *a1, int wait)
  195. {
  196. struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
  197. int delay;
  198. u32 status;
  199. int dev_cmd_err[] = {
  200. /* convert from fw's version of error.h to host's version */
  201. 0, /* ERR_SUCCESS */
  202. EINVAL, /* ERR_EINVAL */
  203. EFAULT, /* ERR_EFAULT */
  204. EPERM, /* ERR_EPERM */
  205. EBUSY, /* ERR_EBUSY */
  206. };
  207. int err;
  208. status = ioread32(&devcmd->status);
  209. if (status & STAT_BUSY) {
  210. printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
  211. return -EBUSY;
  212. }
  213. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
  214. writeq(*a0, &devcmd->args[0]);
  215. writeq(*a1, &devcmd->args[1]);
  216. wmb();
  217. }
  218. iowrite32(cmd, &devcmd->cmd);
  219. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  220. return 0;
  221. for (delay = 0; delay < wait; delay++) {
  222. udelay(100);
  223. status = ioread32(&devcmd->status);
  224. if (!(status & STAT_BUSY)) {
  225. if (status & STAT_ERROR) {
  226. err = dev_cmd_err[(int)readq(&devcmd->args[0])];
  227. printk(KERN_ERR "Error %d devcmd %d\n",
  228. err, _CMD_N(cmd));
  229. return -err;
  230. }
  231. if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
  232. rmb();
  233. *a0 = readq(&devcmd->args[0]);
  234. *a1 = readq(&devcmd->args[1]);
  235. }
  236. return 0;
  237. }
  238. }
  239. printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
  240. return -ETIMEDOUT;
  241. }
  242. int vnic_dev_fw_info(struct vnic_dev *vdev,
  243. struct vnic_devcmd_fw_info **fw_info)
  244. {
  245. u64 a0, a1 = 0;
  246. int wait = 1000;
  247. int err = 0;
  248. if (!vdev->fw_info) {
  249. vdev->fw_info = pci_alloc_consistent(vdev->pdev,
  250. sizeof(struct vnic_devcmd_fw_info),
  251. &vdev->fw_info_pa);
  252. if (!vdev->fw_info)
  253. return -ENOMEM;
  254. a0 = vdev->fw_info_pa;
  255. /* only get fw_info once and cache it */
  256. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
  257. }
  258. *fw_info = vdev->fw_info;
  259. return err;
  260. }
  261. int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
  262. void *value)
  263. {
  264. u64 a0, a1;
  265. int wait = 1000;
  266. int err;
  267. a0 = offset;
  268. a1 = size;
  269. err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
  270. switch (size) {
  271. case 1: *(u8 *)value = (u8)a0; break;
  272. case 2: *(u16 *)value = (u16)a0; break;
  273. case 4: *(u32 *)value = (u32)a0; break;
  274. case 8: *(u64 *)value = a0; break;
  275. default: BUG(); break;
  276. }
  277. return err;
  278. }
  279. int vnic_dev_stats_clear(struct vnic_dev *vdev)
  280. {
  281. u64 a0 = 0, a1 = 0;
  282. int wait = 1000;
  283. return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
  284. }
  285. int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
  286. {
  287. u64 a0, a1;
  288. int wait = 1000;
  289. if (!vdev->stats) {
  290. vdev->stats = pci_alloc_consistent(vdev->pdev,
  291. sizeof(struct vnic_stats), &vdev->stats_pa);
  292. if (!vdev->stats)
  293. return -ENOMEM;
  294. }
  295. *stats = vdev->stats;
  296. a0 = vdev->stats_pa;
  297. a1 = sizeof(struct vnic_stats);
  298. return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
  299. }
  300. int vnic_dev_close(struct vnic_dev *vdev)
  301. {
  302. u64 a0 = 0, a1 = 0;
  303. int wait = 1000;
  304. return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
  305. }
  306. int vnic_dev_enable(struct vnic_dev *vdev)
  307. {
  308. u64 a0 = 0, a1 = 0;
  309. int wait = 1000;
  310. return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
  311. }
  312. int vnic_dev_disable(struct vnic_dev *vdev)
  313. {
  314. u64 a0 = 0, a1 = 0;
  315. int wait = 1000;
  316. return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
  317. }
  318. int vnic_dev_open(struct vnic_dev *vdev, int arg)
  319. {
  320. u64 a0 = (u32)arg, a1 = 0;
  321. int wait = 1000;
  322. return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
  323. }
  324. int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
  325. {
  326. u64 a0 = 0, a1 = 0;
  327. int wait = 1000;
  328. int err;
  329. *done = 0;
  330. err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
  331. if (err)
  332. return err;
  333. *done = (a0 == 0);
  334. return 0;
  335. }
  336. int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
  337. {
  338. u64 a0 = (u32)arg, a1 = 0;
  339. int wait = 1000;
  340. return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
  341. }
  342. int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
  343. {
  344. u64 a0 = 0, a1 = 0;
  345. int wait = 1000;
  346. int err;
  347. *done = 0;
  348. err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
  349. if (err)
  350. return err;
  351. *done = (a0 == 0);
  352. return 0;
  353. }
  354. int vnic_dev_hang_notify(struct vnic_dev *vdev)
  355. {
  356. u64 a0, a1;
  357. int wait = 1000;
  358. return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
  359. }
  360. int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  361. {
  362. u64 a0, a1;
  363. int wait = 1000;
  364. int err, i;
  365. for (i = 0; i < ETH_ALEN; i++)
  366. mac_addr[i] = 0;
  367. err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
  368. if (err)
  369. return err;
  370. for (i = 0; i < ETH_ALEN; i++)
  371. mac_addr[i] = ((u8 *)&a0)[i];
  372. return 0;
  373. }
  374. void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
  375. int broadcast, int promisc, int allmulti)
  376. {
  377. u64 a0, a1 = 0;
  378. int wait = 1000;
  379. int err;
  380. a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
  381. (multicast ? CMD_PFILTER_MULTICAST : 0) |
  382. (broadcast ? CMD_PFILTER_BROADCAST : 0) |
  383. (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
  384. (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
  385. err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
  386. if (err)
  387. printk(KERN_ERR "Can't set packet filter\n");
  388. }
  389. void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
  390. {
  391. u64 a0 = 0, a1 = 0;
  392. int wait = 1000;
  393. int err;
  394. int i;
  395. for (i = 0; i < ETH_ALEN; i++)
  396. ((u8 *)&a0)[i] = addr[i];
  397. err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
  398. if (err)
  399. printk(KERN_ERR
  400. "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
  401. addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
  402. err);
  403. }
  404. void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
  405. {
  406. u64 a0 = 0, a1 = 0;
  407. int wait = 1000;
  408. int err;
  409. int i;
  410. for (i = 0; i < ETH_ALEN; i++)
  411. ((u8 *)&a0)[i] = addr[i];
  412. err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
  413. if (err)
  414. printk(KERN_ERR
  415. "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
  416. addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
  417. err);
  418. }
  419. int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
  420. {
  421. u64 a0, a1;
  422. int wait = 1000;
  423. if (!vdev->notify) {
  424. vdev->notify = pci_alloc_consistent(vdev->pdev,
  425. sizeof(struct vnic_devcmd_notify),
  426. &vdev->notify_pa);
  427. if (!vdev->notify)
  428. return -ENOMEM;
  429. }
  430. a0 = vdev->notify_pa;
  431. a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
  432. a1 += sizeof(struct vnic_devcmd_notify);
  433. return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  434. }
  435. void vnic_dev_notify_unset(struct vnic_dev *vdev)
  436. {
  437. u64 a0, a1;
  438. int wait = 1000;
  439. a0 = 0; /* paddr = 0 to unset notify buffer */
  440. a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
  441. a1 += sizeof(struct vnic_devcmd_notify);
  442. vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  443. }
  444. static int vnic_dev_notify_ready(struct vnic_dev *vdev)
  445. {
  446. u32 *words;
  447. unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
  448. unsigned int i;
  449. u32 csum;
  450. if (!vdev->notify)
  451. return 0;
  452. do {
  453. csum = 0;
  454. memcpy(&vdev->notify_copy, vdev->notify,
  455. sizeof(struct vnic_devcmd_notify));
  456. words = (u32 *)&vdev->notify_copy;
  457. for (i = 1; i < nwords; i++)
  458. csum += words[i];
  459. } while (csum != words[0]);
  460. return 1;
  461. }
  462. int vnic_dev_init(struct vnic_dev *vdev, int arg)
  463. {
  464. u64 a0 = (u32)arg, a1 = 0;
  465. int wait = 1000;
  466. return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
  467. }
  468. int vnic_dev_link_status(struct vnic_dev *vdev)
  469. {
  470. if (vdev->linkstatus)
  471. return *vdev->linkstatus;
  472. if (!vnic_dev_notify_ready(vdev))
  473. return 0;
  474. return vdev->notify_copy.link_state;
  475. }
  476. u32 vnic_dev_port_speed(struct vnic_dev *vdev)
  477. {
  478. if (!vnic_dev_notify_ready(vdev))
  479. return 0;
  480. return vdev->notify_copy.port_speed;
  481. }
  482. u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
  483. {
  484. if (!vnic_dev_notify_ready(vdev))
  485. return 0;
  486. return vdev->notify_copy.msglvl;
  487. }
  488. u32 vnic_dev_mtu(struct vnic_dev *vdev)
  489. {
  490. if (!vnic_dev_notify_ready(vdev))
  491. return 0;
  492. return vdev->notify_copy.mtu;
  493. }
  494. void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
  495. enum vnic_dev_intr_mode intr_mode)
  496. {
  497. vdev->intr_mode = intr_mode;
  498. }
  499. enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
  500. struct vnic_dev *vdev)
  501. {
  502. return vdev->intr_mode;
  503. }
  504. void vnic_dev_unregister(struct vnic_dev *vdev)
  505. {
  506. if (vdev) {
  507. if (vdev->notify)
  508. pci_free_consistent(vdev->pdev,
  509. sizeof(struct vnic_devcmd_notify),
  510. vdev->notify,
  511. vdev->notify_pa);
  512. if (vdev->linkstatus)
  513. pci_free_consistent(vdev->pdev,
  514. sizeof(u32),
  515. vdev->linkstatus,
  516. vdev->linkstatus_pa);
  517. if (vdev->stats)
  518. pci_free_consistent(vdev->pdev,
  519. sizeof(struct vnic_dev),
  520. vdev->stats, vdev->stats_pa);
  521. if (vdev->fw_info)
  522. pci_free_consistent(vdev->pdev,
  523. sizeof(struct vnic_devcmd_fw_info),
  524. vdev->fw_info, vdev->fw_info_pa);
  525. kfree(vdev);
  526. }
  527. }
  528. struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
  529. void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
  530. {
  531. if (!vdev) {
  532. vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
  533. if (!vdev)
  534. return NULL;
  535. }
  536. vdev->priv = priv;
  537. vdev->pdev = pdev;
  538. if (vnic_dev_discover_res(vdev, bar))
  539. goto err_out;
  540. vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
  541. if (!vdev->devcmd)
  542. goto err_out;
  543. return vdev;
  544. err_out:
  545. vnic_dev_unregister(vdev);
  546. return NULL;
  547. }