vnic_dev.c 21 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003
  1. /*
  2. * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. *
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/types.h>
  22. #include <linux/pci.h>
  23. #include <linux/delay.h>
  24. #include <linux/if_ether.h>
  25. #include "vnic_resource.h"
  26. #include "vnic_devcmd.h"
  27. #include "vnic_dev.h"
  28. #include "vnic_stats.h"
  29. enum vnic_proxy_type {
  30. PROXY_NONE,
  31. PROXY_BY_BDF,
  32. };
  33. struct vnic_res {
  34. void __iomem *vaddr;
  35. dma_addr_t bus_addr;
  36. unsigned int count;
  37. };
  38. struct vnic_intr_coal_timer_info {
  39. u32 mul;
  40. u32 div;
  41. u32 max_usec;
  42. };
  43. struct vnic_dev {
  44. void *priv;
  45. struct pci_dev *pdev;
  46. struct vnic_res res[RES_TYPE_MAX];
  47. enum vnic_dev_intr_mode intr_mode;
  48. struct vnic_devcmd __iomem *devcmd;
  49. struct vnic_devcmd_notify *notify;
  50. struct vnic_devcmd_notify notify_copy;
  51. dma_addr_t notify_pa;
  52. u32 notify_sz;
  53. dma_addr_t linkstatus_pa;
  54. struct vnic_stats *stats;
  55. dma_addr_t stats_pa;
  56. struct vnic_devcmd_fw_info *fw_info;
  57. dma_addr_t fw_info_pa;
  58. enum vnic_proxy_type proxy;
  59. u32 proxy_index;
  60. u64 args[VNIC_DEVCMD_NARGS];
  61. struct vnic_intr_coal_timer_info intr_coal_timer_info;
  62. };
  63. #define VNIC_MAX_RES_HDR_SIZE \
  64. (sizeof(struct vnic_resource_header) + \
  65. sizeof(struct vnic_resource) * RES_TYPE_MAX)
  66. #define VNIC_RES_STRIDE 128
  67. void *vnic_dev_priv(struct vnic_dev *vdev)
  68. {
  69. return vdev->priv;
  70. }
  71. static int vnic_dev_discover_res(struct vnic_dev *vdev,
  72. struct vnic_dev_bar *bar, unsigned int num_bars)
  73. {
  74. struct vnic_resource_header __iomem *rh;
  75. struct mgmt_barmap_hdr __iomem *mrh;
  76. struct vnic_resource __iomem *r;
  77. u8 type;
  78. if (num_bars == 0)
  79. return -EINVAL;
  80. if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
  81. pr_err("vNIC BAR0 res hdr length error\n");
  82. return -EINVAL;
  83. }
  84. rh = bar->vaddr;
  85. mrh = bar->vaddr;
  86. if (!rh) {
  87. pr_err("vNIC BAR0 res hdr not mem-mapped\n");
  88. return -EINVAL;
  89. }
  90. /* Check for mgmt vnic in addition to normal vnic */
  91. if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
  92. (ioread32(&rh->version) != VNIC_RES_VERSION)) {
  93. if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
  94. (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
  95. pr_err("vNIC BAR0 res magic/version error "
  96. "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
  97. VNIC_RES_MAGIC, VNIC_RES_VERSION,
  98. MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
  99. ioread32(&rh->magic), ioread32(&rh->version));
  100. return -EINVAL;
  101. }
  102. }
  103. if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
  104. r = (struct vnic_resource __iomem *)(mrh + 1);
  105. else
  106. r = (struct vnic_resource __iomem *)(rh + 1);
  107. while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
  108. u8 bar_num = ioread8(&r->bar);
  109. u32 bar_offset = ioread32(&r->bar_offset);
  110. u32 count = ioread32(&r->count);
  111. u32 len;
  112. r++;
  113. if (bar_num >= num_bars)
  114. continue;
  115. if (!bar[bar_num].len || !bar[bar_num].vaddr)
  116. continue;
  117. switch (type) {
  118. case RES_TYPE_WQ:
  119. case RES_TYPE_RQ:
  120. case RES_TYPE_CQ:
  121. case RES_TYPE_INTR_CTRL:
  122. /* each count is stride bytes long */
  123. len = count * VNIC_RES_STRIDE;
  124. if (len + bar_offset > bar[bar_num].len) {
  125. pr_err("vNIC BAR0 resource %d "
  126. "out-of-bounds, offset 0x%x + "
  127. "size 0x%x > bar len 0x%lx\n",
  128. type, bar_offset,
  129. len,
  130. bar[bar_num].len);
  131. return -EINVAL;
  132. }
  133. break;
  134. case RES_TYPE_INTR_PBA_LEGACY:
  135. case RES_TYPE_DEVCMD:
  136. len = count;
  137. break;
  138. default:
  139. continue;
  140. }
  141. vdev->res[type].count = count;
  142. vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
  143. bar_offset;
  144. vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
  145. }
  146. return 0;
  147. }
  148. unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
  149. enum vnic_res_type type)
  150. {
  151. return vdev->res[type].count;
  152. }
  153. void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
  154. unsigned int index)
  155. {
  156. if (!vdev->res[type].vaddr)
  157. return NULL;
  158. switch (type) {
  159. case RES_TYPE_WQ:
  160. case RES_TYPE_RQ:
  161. case RES_TYPE_CQ:
  162. case RES_TYPE_INTR_CTRL:
  163. return (char __iomem *)vdev->res[type].vaddr +
  164. index * VNIC_RES_STRIDE;
  165. default:
  166. return (char __iomem *)vdev->res[type].vaddr;
  167. }
  168. }
  169. static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
  170. unsigned int desc_count, unsigned int desc_size)
  171. {
  172. /* The base address of the desc rings must be 512 byte aligned.
  173. * Descriptor count is aligned to groups of 32 descriptors. A
  174. * count of 0 means the maximum 4096 descriptors. Descriptor
  175. * size is aligned to 16 bytes.
  176. */
  177. unsigned int count_align = 32;
  178. unsigned int desc_align = 16;
  179. ring->base_align = 512;
  180. if (desc_count == 0)
  181. desc_count = 4096;
  182. ring->desc_count = ALIGN(desc_count, count_align);
  183. ring->desc_size = ALIGN(desc_size, desc_align);
  184. ring->size = ring->desc_count * ring->desc_size;
  185. ring->size_unaligned = ring->size + ring->base_align;
  186. return ring->size_unaligned;
  187. }
  188. void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
  189. {
  190. memset(ring->descs, 0, ring->size);
  191. }
  192. int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
  193. unsigned int desc_count, unsigned int desc_size)
  194. {
  195. vnic_dev_desc_ring_size(ring, desc_count, desc_size);
  196. ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
  197. ring->size_unaligned,
  198. &ring->base_addr_unaligned);
  199. if (!ring->descs_unaligned) {
  200. pr_err("Failed to allocate ring (size=%d), aborting\n",
  201. (int)ring->size);
  202. return -ENOMEM;
  203. }
  204. ring->base_addr = ALIGN(ring->base_addr_unaligned,
  205. ring->base_align);
  206. ring->descs = (u8 *)ring->descs_unaligned +
  207. (ring->base_addr - ring->base_addr_unaligned);
  208. vnic_dev_clear_desc_ring(ring);
  209. ring->desc_avail = ring->desc_count - 1;
  210. return 0;
  211. }
  212. void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
  213. {
  214. if (ring->descs) {
  215. pci_free_consistent(vdev->pdev,
  216. ring->size_unaligned,
  217. ring->descs_unaligned,
  218. ring->base_addr_unaligned);
  219. ring->descs = NULL;
  220. }
  221. }
  222. static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  223. int wait)
  224. {
  225. struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
  226. unsigned int i;
  227. int delay;
  228. u32 status;
  229. int err;
  230. status = ioread32(&devcmd->status);
  231. if (status == 0xFFFFFFFF) {
  232. /* PCI-e target device is gone */
  233. return -ENODEV;
  234. }
  235. if (status & STAT_BUSY) {
  236. pr_err("Busy devcmd %d\n", _CMD_N(cmd));
  237. return -EBUSY;
  238. }
  239. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
  240. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  241. writeq(vdev->args[i], &devcmd->args[i]);
  242. wmb();
  243. }
  244. iowrite32(cmd, &devcmd->cmd);
  245. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  246. return 0;
  247. for (delay = 0; delay < wait; delay++) {
  248. udelay(100);
  249. status = ioread32(&devcmd->status);
  250. if (status == 0xFFFFFFFF) {
  251. /* PCI-e target device is gone */
  252. return -ENODEV;
  253. }
  254. if (!(status & STAT_BUSY)) {
  255. if (status & STAT_ERROR) {
  256. err = (int)readq(&devcmd->args[0]);
  257. if (err != ERR_ECMDUNKNOWN ||
  258. cmd != CMD_CAPABILITY)
  259. pr_err("Error %d devcmd %d\n",
  260. err, _CMD_N(cmd));
  261. return err;
  262. }
  263. if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
  264. rmb();
  265. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  266. vdev->args[i] = readq(&devcmd->args[i]);
  267. }
  268. return 0;
  269. }
  270. }
  271. pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
  272. return -ETIMEDOUT;
  273. }
  274. static int vnic_dev_cmd_proxy_by_bdf(struct vnic_dev *vdev,
  275. enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
  276. {
  277. u32 status;
  278. int err;
  279. memset(vdev->args, 0, sizeof(vdev->args));
  280. vdev->args[0] = vdev->proxy_index; /* bdf */
  281. vdev->args[1] = cmd;
  282. vdev->args[2] = *a0;
  283. vdev->args[3] = *a1;
  284. err = _vnic_dev_cmd(vdev, CMD_PROXY_BY_BDF, wait);
  285. if (err)
  286. return err;
  287. status = (u32)vdev->args[0];
  288. if (status & STAT_ERROR) {
  289. err = (int)vdev->args[1];
  290. if (err != ERR_ECMDUNKNOWN ||
  291. cmd != CMD_CAPABILITY)
  292. pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
  293. return err;
  294. }
  295. *a0 = vdev->args[1];
  296. *a1 = vdev->args[2];
  297. return 0;
  298. }
  299. static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
  300. enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
  301. {
  302. int err;
  303. vdev->args[0] = *a0;
  304. vdev->args[1] = *a1;
  305. err = _vnic_dev_cmd(vdev, cmd, wait);
  306. *a0 = vdev->args[0];
  307. *a1 = vdev->args[1];
  308. return err;
  309. }
  310. int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  311. u64 *a0, u64 *a1, int wait)
  312. {
  313. memset(vdev->args, 0, sizeof(vdev->args));
  314. switch (vdev->proxy) {
  315. case PROXY_BY_BDF:
  316. return vnic_dev_cmd_proxy_by_bdf(vdev, cmd, a0, a1, wait);
  317. case PROXY_NONE:
  318. default:
  319. return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
  320. }
  321. }
  322. static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
  323. {
  324. u64 a0 = (u32)cmd, a1 = 0;
  325. int wait = 1000;
  326. int err;
  327. err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
  328. return !(err || a0);
  329. }
  330. int vnic_dev_fw_info(struct vnic_dev *vdev,
  331. struct vnic_devcmd_fw_info **fw_info)
  332. {
  333. u64 a0, a1 = 0;
  334. int wait = 1000;
  335. int err = 0;
  336. if (!vdev->fw_info) {
  337. vdev->fw_info = pci_alloc_consistent(vdev->pdev,
  338. sizeof(struct vnic_devcmd_fw_info),
  339. &vdev->fw_info_pa);
  340. if (!vdev->fw_info)
  341. return -ENOMEM;
  342. memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info));
  343. a0 = vdev->fw_info_pa;
  344. a1 = sizeof(struct vnic_devcmd_fw_info);
  345. /* only get fw_info once and cache it */
  346. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
  347. if (err == ERR_ECMDUNKNOWN) {
  348. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
  349. &a0, &a1, wait);
  350. }
  351. }
  352. *fw_info = vdev->fw_info;
  353. return err;
  354. }
  355. int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
  356. void *value)
  357. {
  358. u64 a0, a1;
  359. int wait = 1000;
  360. int err;
  361. a0 = offset;
  362. a1 = size;
  363. err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
  364. switch (size) {
  365. case 1: *(u8 *)value = (u8)a0; break;
  366. case 2: *(u16 *)value = (u16)a0; break;
  367. case 4: *(u32 *)value = (u32)a0; break;
  368. case 8: *(u64 *)value = a0; break;
  369. default: BUG(); break;
  370. }
  371. return err;
  372. }
  373. int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
  374. {
  375. u64 a0, a1;
  376. int wait = 1000;
  377. if (!vdev->stats) {
  378. vdev->stats = pci_alloc_consistent(vdev->pdev,
  379. sizeof(struct vnic_stats), &vdev->stats_pa);
  380. if (!vdev->stats)
  381. return -ENOMEM;
  382. }
  383. *stats = vdev->stats;
  384. a0 = vdev->stats_pa;
  385. a1 = sizeof(struct vnic_stats);
  386. return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
  387. }
  388. int vnic_dev_close(struct vnic_dev *vdev)
  389. {
  390. u64 a0 = 0, a1 = 0;
  391. int wait = 1000;
  392. return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
  393. }
  394. int vnic_dev_enable_wait(struct vnic_dev *vdev)
  395. {
  396. u64 a0 = 0, a1 = 0;
  397. int wait = 1000;
  398. int err;
  399. err = vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
  400. if (err == ERR_ECMDUNKNOWN)
  401. return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
  402. return err;
  403. }
  404. int vnic_dev_disable(struct vnic_dev *vdev)
  405. {
  406. u64 a0 = 0, a1 = 0;
  407. int wait = 1000;
  408. return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
  409. }
  410. int vnic_dev_open(struct vnic_dev *vdev, int arg)
  411. {
  412. u64 a0 = (u32)arg, a1 = 0;
  413. int wait = 1000;
  414. return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
  415. }
  416. int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
  417. {
  418. u64 a0 = 0, a1 = 0;
  419. int wait = 1000;
  420. int err;
  421. *done = 0;
  422. err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
  423. if (err)
  424. return err;
  425. *done = (a0 == 0);
  426. return 0;
  427. }
  428. static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
  429. {
  430. u64 a0 = (u32)arg, a1 = 0;
  431. int wait = 1000;
  432. return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
  433. }
  434. static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
  435. {
  436. u64 a0 = 0, a1 = 0;
  437. int wait = 1000;
  438. int err;
  439. *done = 0;
  440. err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
  441. if (err)
  442. return err;
  443. *done = (a0 == 0);
  444. return 0;
  445. }
  446. int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
  447. {
  448. u64 a0 = (u32)arg, a1 = 0;
  449. int wait = 1000;
  450. int err;
  451. err = vnic_dev_cmd(vdev, CMD_HANG_RESET, &a0, &a1, wait);
  452. if (err == ERR_ECMDUNKNOWN) {
  453. err = vnic_dev_soft_reset(vdev, arg);
  454. if (err)
  455. return err;
  456. return vnic_dev_init(vdev, 0);
  457. }
  458. return err;
  459. }
  460. int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
  461. {
  462. u64 a0 = 0, a1 = 0;
  463. int wait = 1000;
  464. int err;
  465. *done = 0;
  466. err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS, &a0, &a1, wait);
  467. if (err) {
  468. if (err == ERR_ECMDUNKNOWN)
  469. return vnic_dev_soft_reset_done(vdev, done);
  470. return err;
  471. }
  472. *done = (a0 == 0);
  473. return 0;
  474. }
  475. int vnic_dev_hang_notify(struct vnic_dev *vdev)
  476. {
  477. u64 a0, a1;
  478. int wait = 1000;
  479. return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
  480. }
  481. int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  482. {
  483. u64 a0, a1;
  484. int wait = 1000;
  485. int err, i;
  486. for (i = 0; i < ETH_ALEN; i++)
  487. mac_addr[i] = 0;
  488. err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
  489. if (err)
  490. return err;
  491. for (i = 0; i < ETH_ALEN; i++)
  492. mac_addr[i] = ((u8 *)&a0)[i];
  493. return 0;
  494. }
  495. int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
  496. int broadcast, int promisc, int allmulti)
  497. {
  498. u64 a0, a1 = 0;
  499. int wait = 1000;
  500. int err;
  501. a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
  502. (multicast ? CMD_PFILTER_MULTICAST : 0) |
  503. (broadcast ? CMD_PFILTER_BROADCAST : 0) |
  504. (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
  505. (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
  506. err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
  507. if (err)
  508. pr_err("Can't set packet filter\n");
  509. return err;
  510. }
  511. int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
  512. {
  513. u64 a0 = 0, a1 = 0;
  514. int wait = 1000;
  515. int err;
  516. int i;
  517. for (i = 0; i < ETH_ALEN; i++)
  518. ((u8 *)&a0)[i] = addr[i];
  519. err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
  520. if (err)
  521. pr_err("Can't add addr [%pM], %d\n", addr, err);
  522. return err;
  523. }
  524. int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
  525. {
  526. u64 a0 = 0, a1 = 0;
  527. int wait = 1000;
  528. int err;
  529. int i;
  530. for (i = 0; i < ETH_ALEN; i++)
  531. ((u8 *)&a0)[i] = addr[i];
  532. err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
  533. if (err)
  534. pr_err("Can't del addr [%pM], %d\n", addr, err);
  535. return err;
  536. }
  537. int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
  538. u8 ig_vlan_rewrite_mode)
  539. {
  540. u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
  541. int wait = 1000;
  542. int err;
  543. err = vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, &a0, &a1, wait);
  544. if (err == ERR_ECMDUNKNOWN)
  545. return 0;
  546. return err;
  547. }
  548. static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
  549. void *notify_addr, dma_addr_t notify_pa, u16 intr)
  550. {
  551. u64 a0, a1;
  552. int wait = 1000;
  553. int r;
  554. memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
  555. vdev->notify = notify_addr;
  556. vdev->notify_pa = notify_pa;
  557. a0 = (u64)notify_pa;
  558. a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
  559. a1 += sizeof(struct vnic_devcmd_notify);
  560. r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  561. vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
  562. return r;
  563. }
  564. int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
  565. {
  566. void *notify_addr;
  567. dma_addr_t notify_pa;
  568. if (vdev->notify || vdev->notify_pa) {
  569. pr_err("notify block %p still allocated", vdev->notify);
  570. return -EINVAL;
  571. }
  572. notify_addr = pci_alloc_consistent(vdev->pdev,
  573. sizeof(struct vnic_devcmd_notify),
  574. &notify_pa);
  575. if (!notify_addr)
  576. return -ENOMEM;
  577. return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
  578. }
  579. static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
  580. {
  581. u64 a0, a1;
  582. int wait = 1000;
  583. int err;
  584. a0 = 0; /* paddr = 0 to unset notify buffer */
  585. a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
  586. a1 += sizeof(struct vnic_devcmd_notify);
  587. err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  588. vdev->notify = NULL;
  589. vdev->notify_pa = 0;
  590. vdev->notify_sz = 0;
  591. return err;
  592. }
  593. int vnic_dev_notify_unset(struct vnic_dev *vdev)
  594. {
  595. if (vdev->notify) {
  596. pci_free_consistent(vdev->pdev,
  597. sizeof(struct vnic_devcmd_notify),
  598. vdev->notify,
  599. vdev->notify_pa);
  600. }
  601. return vnic_dev_notify_unsetcmd(vdev);
  602. }
  603. static int vnic_dev_notify_ready(struct vnic_dev *vdev)
  604. {
  605. u32 *words;
  606. unsigned int nwords = vdev->notify_sz / 4;
  607. unsigned int i;
  608. u32 csum;
  609. if (!vdev->notify || !vdev->notify_sz)
  610. return 0;
  611. do {
  612. csum = 0;
  613. memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
  614. words = (u32 *)&vdev->notify_copy;
  615. for (i = 1; i < nwords; i++)
  616. csum += words[i];
  617. } while (csum != words[0]);
  618. return 1;
  619. }
  620. int vnic_dev_init(struct vnic_dev *vdev, int arg)
  621. {
  622. u64 a0 = (u32)arg, a1 = 0;
  623. int wait = 1000;
  624. int r = 0;
  625. if (vnic_dev_capable(vdev, CMD_INIT))
  626. r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
  627. else {
  628. vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
  629. if (a0 & CMD_INITF_DEFAULT_MAC) {
  630. /* Emulate these for old CMD_INIT_v1 which
  631. * didn't pass a0 so no CMD_INITF_*.
  632. */
  633. vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
  634. vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
  635. }
  636. }
  637. return r;
  638. }
  639. int vnic_dev_deinit(struct vnic_dev *vdev)
  640. {
  641. u64 a0 = 0, a1 = 0;
  642. int wait = 1000;
  643. return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
  644. }
  645. void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
  646. {
  647. /* Default: hardware intr coal timer is in units of 1.5 usecs */
  648. vdev->intr_coal_timer_info.mul = 2;
  649. vdev->intr_coal_timer_info.div = 3;
  650. vdev->intr_coal_timer_info.max_usec =
  651. vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
  652. }
  653. int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
  654. {
  655. int wait = 1000;
  656. int err;
  657. memset(vdev->args, 0, sizeof(vdev->args));
  658. err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
  659. /* Use defaults when firmware doesn't support the devcmd at all or
  660. * supports it for only specific hardware
  661. */
  662. if ((err == ERR_ECMDUNKNOWN) ||
  663. (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
  664. pr_warning("Using default conversion factor for "
  665. "interrupt coalesce timer\n");
  666. vnic_dev_intr_coal_timer_info_default(vdev);
  667. return 0;
  668. }
  669. vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
  670. vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
  671. vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
  672. return err;
  673. }
  674. int vnic_dev_link_status(struct vnic_dev *vdev)
  675. {
  676. if (!vnic_dev_notify_ready(vdev))
  677. return 0;
  678. return vdev->notify_copy.link_state;
  679. }
  680. u32 vnic_dev_port_speed(struct vnic_dev *vdev)
  681. {
  682. if (!vnic_dev_notify_ready(vdev))
  683. return 0;
  684. return vdev->notify_copy.port_speed;
  685. }
  686. u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
  687. {
  688. if (!vnic_dev_notify_ready(vdev))
  689. return 0;
  690. return vdev->notify_copy.msglvl;
  691. }
  692. u32 vnic_dev_mtu(struct vnic_dev *vdev)
  693. {
  694. if (!vnic_dev_notify_ready(vdev))
  695. return 0;
  696. return vdev->notify_copy.mtu;
  697. }
  698. void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
  699. enum vnic_dev_intr_mode intr_mode)
  700. {
  701. vdev->intr_mode = intr_mode;
  702. }
  703. enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
  704. struct vnic_dev *vdev)
  705. {
  706. return vdev->intr_mode;
  707. }
  708. u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
  709. {
  710. return (usec * vdev->intr_coal_timer_info.mul) /
  711. vdev->intr_coal_timer_info.div;
  712. }
  713. u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
  714. {
  715. return (hw_cycles * vdev->intr_coal_timer_info.div) /
  716. vdev->intr_coal_timer_info.mul;
  717. }
  718. u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
  719. {
  720. return vdev->intr_coal_timer_info.max_usec;
  721. }
  722. void vnic_dev_unregister(struct vnic_dev *vdev)
  723. {
  724. if (vdev) {
  725. if (vdev->notify)
  726. pci_free_consistent(vdev->pdev,
  727. sizeof(struct vnic_devcmd_notify),
  728. vdev->notify,
  729. vdev->notify_pa);
  730. if (vdev->stats)
  731. pci_free_consistent(vdev->pdev,
  732. sizeof(struct vnic_stats),
  733. vdev->stats, vdev->stats_pa);
  734. if (vdev->fw_info)
  735. pci_free_consistent(vdev->pdev,
  736. sizeof(struct vnic_devcmd_fw_info),
  737. vdev->fw_info, vdev->fw_info_pa);
  738. kfree(vdev);
  739. }
  740. }
  741. struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
  742. void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
  743. unsigned int num_bars)
  744. {
  745. if (!vdev) {
  746. vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
  747. if (!vdev)
  748. return NULL;
  749. }
  750. vdev->priv = priv;
  751. vdev->pdev = pdev;
  752. if (vnic_dev_discover_res(vdev, bar, num_bars))
  753. goto err_out;
  754. vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
  755. if (!vdev->devcmd)
  756. goto err_out;
  757. return vdev;
  758. err_out:
  759. vnic_dev_unregister(vdev);
  760. return NULL;
  761. }
  762. int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
  763. {
  764. u64 a0, a1 = len;
  765. int wait = 1000;
  766. dma_addr_t prov_pa;
  767. void *prov_buf;
  768. int ret;
  769. prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
  770. if (!prov_buf)
  771. return -ENOMEM;
  772. memcpy(prov_buf, buf, len);
  773. a0 = prov_pa;
  774. ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
  775. pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
  776. return ret;
  777. }
  778. int vnic_dev_enable2(struct vnic_dev *vdev, int active)
  779. {
  780. u64 a0, a1 = 0;
  781. int wait = 1000;
  782. a0 = (active ? CMD_ENABLE2_ACTIVE : 0);
  783. return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
  784. }
  785. static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  786. int *status)
  787. {
  788. u64 a0 = cmd, a1 = 0;
  789. int wait = 1000;
  790. int ret;
  791. ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
  792. if (!ret)
  793. *status = (int)a0;
  794. return ret;
  795. }
  796. int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
  797. {
  798. return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
  799. }
  800. int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
  801. {
  802. return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
  803. }