vnic_dev.c 22 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. /*
  2. * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. *
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/types.h>
  22. #include <linux/pci.h>
  23. #include <linux/delay.h>
  24. #include <linux/if_ether.h>
  25. #include "vnic_resource.h"
  26. #include "vnic_devcmd.h"
  27. #include "vnic_dev.h"
  28. #include "vnic_stats.h"
  29. enum vnic_proxy_type {
  30. PROXY_NONE,
  31. PROXY_BY_BDF,
  32. PROXY_BY_INDEX,
  33. };
  34. struct vnic_res {
  35. void __iomem *vaddr;
  36. dma_addr_t bus_addr;
  37. unsigned int count;
  38. };
  39. struct vnic_intr_coal_timer_info {
  40. u32 mul;
  41. u32 div;
  42. u32 max_usec;
  43. };
  44. struct vnic_dev {
  45. void *priv;
  46. struct pci_dev *pdev;
  47. struct vnic_res res[RES_TYPE_MAX];
  48. enum vnic_dev_intr_mode intr_mode;
  49. struct vnic_devcmd __iomem *devcmd;
  50. struct vnic_devcmd_notify *notify;
  51. struct vnic_devcmd_notify notify_copy;
  52. dma_addr_t notify_pa;
  53. u32 notify_sz;
  54. dma_addr_t linkstatus_pa;
  55. struct vnic_stats *stats;
  56. dma_addr_t stats_pa;
  57. struct vnic_devcmd_fw_info *fw_info;
  58. dma_addr_t fw_info_pa;
  59. enum vnic_proxy_type proxy;
  60. u32 proxy_index;
  61. u64 args[VNIC_DEVCMD_NARGS];
  62. struct vnic_intr_coal_timer_info intr_coal_timer_info;
  63. };
  64. #define VNIC_MAX_RES_HDR_SIZE \
  65. (sizeof(struct vnic_resource_header) + \
  66. sizeof(struct vnic_resource) * RES_TYPE_MAX)
  67. #define VNIC_RES_STRIDE 128
  68. void *vnic_dev_priv(struct vnic_dev *vdev)
  69. {
  70. return vdev->priv;
  71. }
  72. static int vnic_dev_discover_res(struct vnic_dev *vdev,
  73. struct vnic_dev_bar *bar, unsigned int num_bars)
  74. {
  75. struct vnic_resource_header __iomem *rh;
  76. struct mgmt_barmap_hdr __iomem *mrh;
  77. struct vnic_resource __iomem *r;
  78. u8 type;
  79. if (num_bars == 0)
  80. return -EINVAL;
  81. if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
  82. pr_err("vNIC BAR0 res hdr length error\n");
  83. return -EINVAL;
  84. }
  85. rh = bar->vaddr;
  86. mrh = bar->vaddr;
  87. if (!rh) {
  88. pr_err("vNIC BAR0 res hdr not mem-mapped\n");
  89. return -EINVAL;
  90. }
  91. /* Check for mgmt vnic in addition to normal vnic */
  92. if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
  93. (ioread32(&rh->version) != VNIC_RES_VERSION)) {
  94. if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
  95. (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
  96. pr_err("vNIC BAR0 res magic/version error "
  97. "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
  98. VNIC_RES_MAGIC, VNIC_RES_VERSION,
  99. MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
  100. ioread32(&rh->magic), ioread32(&rh->version));
  101. return -EINVAL;
  102. }
  103. }
  104. if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
  105. r = (struct vnic_resource __iomem *)(mrh + 1);
  106. else
  107. r = (struct vnic_resource __iomem *)(rh + 1);
  108. while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
  109. u8 bar_num = ioread8(&r->bar);
  110. u32 bar_offset = ioread32(&r->bar_offset);
  111. u32 count = ioread32(&r->count);
  112. u32 len;
  113. r++;
  114. if (bar_num >= num_bars)
  115. continue;
  116. if (!bar[bar_num].len || !bar[bar_num].vaddr)
  117. continue;
  118. switch (type) {
  119. case RES_TYPE_WQ:
  120. case RES_TYPE_RQ:
  121. case RES_TYPE_CQ:
  122. case RES_TYPE_INTR_CTRL:
  123. /* each count is stride bytes long */
  124. len = count * VNIC_RES_STRIDE;
  125. if (len + bar_offset > bar[bar_num].len) {
  126. pr_err("vNIC BAR0 resource %d "
  127. "out-of-bounds, offset 0x%x + "
  128. "size 0x%x > bar len 0x%lx\n",
  129. type, bar_offset,
  130. len,
  131. bar[bar_num].len);
  132. return -EINVAL;
  133. }
  134. break;
  135. case RES_TYPE_INTR_PBA_LEGACY:
  136. case RES_TYPE_DEVCMD:
  137. len = count;
  138. break;
  139. default:
  140. continue;
  141. }
  142. vdev->res[type].count = count;
  143. vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
  144. bar_offset;
  145. vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
  146. }
  147. return 0;
  148. }
  149. unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
  150. enum vnic_res_type type)
  151. {
  152. return vdev->res[type].count;
  153. }
  154. void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
  155. unsigned int index)
  156. {
  157. if (!vdev->res[type].vaddr)
  158. return NULL;
  159. switch (type) {
  160. case RES_TYPE_WQ:
  161. case RES_TYPE_RQ:
  162. case RES_TYPE_CQ:
  163. case RES_TYPE_INTR_CTRL:
  164. return (char __iomem *)vdev->res[type].vaddr +
  165. index * VNIC_RES_STRIDE;
  166. default:
  167. return (char __iomem *)vdev->res[type].vaddr;
  168. }
  169. }
  170. static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
  171. unsigned int desc_count, unsigned int desc_size)
  172. {
  173. /* The base address of the desc rings must be 512 byte aligned.
  174. * Descriptor count is aligned to groups of 32 descriptors. A
  175. * count of 0 means the maximum 4096 descriptors. Descriptor
  176. * size is aligned to 16 bytes.
  177. */
  178. unsigned int count_align = 32;
  179. unsigned int desc_align = 16;
  180. ring->base_align = 512;
  181. if (desc_count == 0)
  182. desc_count = 4096;
  183. ring->desc_count = ALIGN(desc_count, count_align);
  184. ring->desc_size = ALIGN(desc_size, desc_align);
  185. ring->size = ring->desc_count * ring->desc_size;
  186. ring->size_unaligned = ring->size + ring->base_align;
  187. return ring->size_unaligned;
  188. }
  189. void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
  190. {
  191. memset(ring->descs, 0, ring->size);
  192. }
  193. int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
  194. unsigned int desc_count, unsigned int desc_size)
  195. {
  196. vnic_dev_desc_ring_size(ring, desc_count, desc_size);
  197. ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
  198. ring->size_unaligned,
  199. &ring->base_addr_unaligned);
  200. if (!ring->descs_unaligned) {
  201. pr_err("Failed to allocate ring (size=%d), aborting\n",
  202. (int)ring->size);
  203. return -ENOMEM;
  204. }
  205. ring->base_addr = ALIGN(ring->base_addr_unaligned,
  206. ring->base_align);
  207. ring->descs = (u8 *)ring->descs_unaligned +
  208. (ring->base_addr - ring->base_addr_unaligned);
  209. vnic_dev_clear_desc_ring(ring);
  210. ring->desc_avail = ring->desc_count - 1;
  211. return 0;
  212. }
  213. void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
  214. {
  215. if (ring->descs) {
  216. pci_free_consistent(vdev->pdev,
  217. ring->size_unaligned,
  218. ring->descs_unaligned,
  219. ring->base_addr_unaligned);
  220. ring->descs = NULL;
  221. }
  222. }
  223. static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  224. int wait)
  225. {
  226. struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
  227. unsigned int i;
  228. int delay;
  229. u32 status;
  230. int err;
  231. status = ioread32(&devcmd->status);
  232. if (status == 0xFFFFFFFF) {
  233. /* PCI-e target device is gone */
  234. return -ENODEV;
  235. }
  236. if (status & STAT_BUSY) {
  237. pr_err("Busy devcmd %d\n", _CMD_N(cmd));
  238. return -EBUSY;
  239. }
  240. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
  241. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  242. writeq(vdev->args[i], &devcmd->args[i]);
  243. wmb();
  244. }
  245. iowrite32(cmd, &devcmd->cmd);
  246. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  247. return 0;
  248. for (delay = 0; delay < wait; delay++) {
  249. udelay(100);
  250. status = ioread32(&devcmd->status);
  251. if (status == 0xFFFFFFFF) {
  252. /* PCI-e target device is gone */
  253. return -ENODEV;
  254. }
  255. if (!(status & STAT_BUSY)) {
  256. if (status & STAT_ERROR) {
  257. err = (int)readq(&devcmd->args[0]);
  258. if (err == ERR_EINVAL &&
  259. cmd == CMD_CAPABILITY)
  260. return err;
  261. if (err != ERR_ECMDUNKNOWN ||
  262. cmd != CMD_CAPABILITY)
  263. pr_err("Error %d devcmd %d\n",
  264. err, _CMD_N(cmd));
  265. return err;
  266. }
  267. if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
  268. rmb();
  269. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  270. vdev->args[i] = readq(&devcmd->args[i]);
  271. }
  272. return 0;
  273. }
  274. }
  275. pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
  276. return -ETIMEDOUT;
  277. }
  278. static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
  279. enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
  280. u64 *a0, u64 *a1, int wait)
  281. {
  282. u32 status;
  283. int err;
  284. memset(vdev->args, 0, sizeof(vdev->args));
  285. vdev->args[0] = vdev->proxy_index;
  286. vdev->args[1] = cmd;
  287. vdev->args[2] = *a0;
  288. vdev->args[3] = *a1;
  289. err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
  290. if (err)
  291. return err;
  292. status = (u32)vdev->args[0];
  293. if (status & STAT_ERROR) {
  294. err = (int)vdev->args[1];
  295. if (err != ERR_ECMDUNKNOWN ||
  296. cmd != CMD_CAPABILITY)
  297. pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
  298. return err;
  299. }
  300. *a0 = vdev->args[1];
  301. *a1 = vdev->args[2];
  302. return 0;
  303. }
  304. static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
  305. enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
  306. {
  307. int err;
  308. vdev->args[0] = *a0;
  309. vdev->args[1] = *a1;
  310. err = _vnic_dev_cmd(vdev, cmd, wait);
  311. *a0 = vdev->args[0];
  312. *a1 = vdev->args[1];
  313. return err;
  314. }
  315. void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index)
  316. {
  317. vdev->proxy = PROXY_BY_INDEX;
  318. vdev->proxy_index = index;
  319. }
  320. void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
  321. {
  322. vdev->proxy = PROXY_NONE;
  323. vdev->proxy_index = 0;
  324. }
  325. int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  326. u64 *a0, u64 *a1, int wait)
  327. {
  328. memset(vdev->args, 0, sizeof(vdev->args));
  329. switch (vdev->proxy) {
  330. case PROXY_BY_INDEX:
  331. return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
  332. a0, a1, wait);
  333. case PROXY_BY_BDF:
  334. return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
  335. a0, a1, wait);
  336. case PROXY_NONE:
  337. default:
  338. return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
  339. }
  340. }
  341. static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
  342. {
  343. u64 a0 = (u32)cmd, a1 = 0;
  344. int wait = 1000;
  345. int err;
  346. err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
  347. return !(err || a0);
  348. }
  349. int vnic_dev_fw_info(struct vnic_dev *vdev,
  350. struct vnic_devcmd_fw_info **fw_info)
  351. {
  352. u64 a0, a1 = 0;
  353. int wait = 1000;
  354. int err = 0;
  355. if (!vdev->fw_info) {
  356. vdev->fw_info = pci_alloc_consistent(vdev->pdev,
  357. sizeof(struct vnic_devcmd_fw_info),
  358. &vdev->fw_info_pa);
  359. if (!vdev->fw_info)
  360. return -ENOMEM;
  361. memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info));
  362. a0 = vdev->fw_info_pa;
  363. a1 = sizeof(struct vnic_devcmd_fw_info);
  364. /* only get fw_info once and cache it */
  365. if (vnic_dev_capable(vdev, CMD_MCPU_FW_INFO))
  366. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO,
  367. &a0, &a1, wait);
  368. else
  369. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
  370. &a0, &a1, wait);
  371. }
  372. *fw_info = vdev->fw_info;
  373. return err;
  374. }
  375. int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
  376. void *value)
  377. {
  378. u64 a0, a1;
  379. int wait = 1000;
  380. int err;
  381. a0 = offset;
  382. a1 = size;
  383. err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
  384. switch (size) {
  385. case 1: *(u8 *)value = (u8)a0; break;
  386. case 2: *(u16 *)value = (u16)a0; break;
  387. case 4: *(u32 *)value = (u32)a0; break;
  388. case 8: *(u64 *)value = a0; break;
  389. default: BUG(); break;
  390. }
  391. return err;
  392. }
  393. int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
  394. {
  395. u64 a0, a1;
  396. int wait = 1000;
  397. if (!vdev->stats) {
  398. vdev->stats = pci_alloc_consistent(vdev->pdev,
  399. sizeof(struct vnic_stats), &vdev->stats_pa);
  400. if (!vdev->stats)
  401. return -ENOMEM;
  402. }
  403. *stats = vdev->stats;
  404. a0 = vdev->stats_pa;
  405. a1 = sizeof(struct vnic_stats);
  406. return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
  407. }
  408. int vnic_dev_close(struct vnic_dev *vdev)
  409. {
  410. u64 a0 = 0, a1 = 0;
  411. int wait = 1000;
  412. return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
  413. }
  414. int vnic_dev_enable_wait(struct vnic_dev *vdev)
  415. {
  416. u64 a0 = 0, a1 = 0;
  417. int wait = 1000;
  418. if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
  419. return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
  420. else
  421. return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
  422. }
  423. int vnic_dev_disable(struct vnic_dev *vdev)
  424. {
  425. u64 a0 = 0, a1 = 0;
  426. int wait = 1000;
  427. return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
  428. }
  429. int vnic_dev_open(struct vnic_dev *vdev, int arg)
  430. {
  431. u64 a0 = (u32)arg, a1 = 0;
  432. int wait = 1000;
  433. return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
  434. }
  435. int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
  436. {
  437. u64 a0 = 0, a1 = 0;
  438. int wait = 1000;
  439. int err;
  440. *done = 0;
  441. err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
  442. if (err)
  443. return err;
  444. *done = (a0 == 0);
  445. return 0;
  446. }
  447. static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
  448. {
  449. u64 a0 = (u32)arg, a1 = 0;
  450. int wait = 1000;
  451. return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
  452. }
  453. static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
  454. {
  455. u64 a0 = 0, a1 = 0;
  456. int wait = 1000;
  457. int err;
  458. *done = 0;
  459. err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
  460. if (err)
  461. return err;
  462. *done = (a0 == 0);
  463. return 0;
  464. }
  465. int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
  466. {
  467. u64 a0 = (u32)arg, a1 = 0;
  468. int wait = 1000;
  469. int err;
  470. if (vnic_dev_capable(vdev, CMD_HANG_RESET)) {
  471. return vnic_dev_cmd(vdev, CMD_HANG_RESET,
  472. &a0, &a1, wait);
  473. } else {
  474. err = vnic_dev_soft_reset(vdev, arg);
  475. if (err)
  476. return err;
  477. return vnic_dev_init(vdev, 0);
  478. }
  479. }
  480. int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
  481. {
  482. u64 a0 = 0, a1 = 0;
  483. int wait = 1000;
  484. int err;
  485. *done = 0;
  486. if (vnic_dev_capable(vdev, CMD_HANG_RESET_STATUS)) {
  487. err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS,
  488. &a0, &a1, wait);
  489. if (err)
  490. return err;
  491. } else {
  492. return vnic_dev_soft_reset_done(vdev, done);
  493. }
  494. *done = (a0 == 0);
  495. return 0;
  496. }
  497. int vnic_dev_hang_notify(struct vnic_dev *vdev)
  498. {
  499. u64 a0, a1;
  500. int wait = 1000;
  501. return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
  502. }
  503. int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  504. {
  505. u64 a0, a1;
  506. int wait = 1000;
  507. int err, i;
  508. for (i = 0; i < ETH_ALEN; i++)
  509. mac_addr[i] = 0;
  510. err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
  511. if (err)
  512. return err;
  513. for (i = 0; i < ETH_ALEN; i++)
  514. mac_addr[i] = ((u8 *)&a0)[i];
  515. return 0;
  516. }
  517. int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
  518. int broadcast, int promisc, int allmulti)
  519. {
  520. u64 a0, a1 = 0;
  521. int wait = 1000;
  522. int err;
  523. a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
  524. (multicast ? CMD_PFILTER_MULTICAST : 0) |
  525. (broadcast ? CMD_PFILTER_BROADCAST : 0) |
  526. (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
  527. (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
  528. err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
  529. if (err)
  530. pr_err("Can't set packet filter\n");
  531. return err;
  532. }
  533. int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
  534. {
  535. u64 a0 = 0, a1 = 0;
  536. int wait = 1000;
  537. int err;
  538. int i;
  539. for (i = 0; i < ETH_ALEN; i++)
  540. ((u8 *)&a0)[i] = addr[i];
  541. err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
  542. if (err)
  543. pr_err("Can't add addr [%pM], %d\n", addr, err);
  544. return err;
  545. }
  546. int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
  547. {
  548. u64 a0 = 0, a1 = 0;
  549. int wait = 1000;
  550. int err;
  551. int i;
  552. for (i = 0; i < ETH_ALEN; i++)
  553. ((u8 *)&a0)[i] = addr[i];
  554. err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
  555. if (err)
  556. pr_err("Can't del addr [%pM], %d\n", addr, err);
  557. return err;
  558. }
  559. int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
  560. u8 ig_vlan_rewrite_mode)
  561. {
  562. u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
  563. int wait = 1000;
  564. if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
  565. return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
  566. &a0, &a1, wait);
  567. else
  568. return 0;
  569. }
  570. static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
  571. void *notify_addr, dma_addr_t notify_pa, u16 intr)
  572. {
  573. u64 a0, a1;
  574. int wait = 1000;
  575. int r;
  576. memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
  577. vdev->notify = notify_addr;
  578. vdev->notify_pa = notify_pa;
  579. a0 = (u64)notify_pa;
  580. a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
  581. a1 += sizeof(struct vnic_devcmd_notify);
  582. r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  583. vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
  584. return r;
  585. }
  586. int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
  587. {
  588. void *notify_addr;
  589. dma_addr_t notify_pa;
  590. if (vdev->notify || vdev->notify_pa) {
  591. pr_err("notify block %p still allocated", vdev->notify);
  592. return -EINVAL;
  593. }
  594. notify_addr = pci_alloc_consistent(vdev->pdev,
  595. sizeof(struct vnic_devcmd_notify),
  596. &notify_pa);
  597. if (!notify_addr)
  598. return -ENOMEM;
  599. return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
  600. }
  601. static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
  602. {
  603. u64 a0, a1;
  604. int wait = 1000;
  605. int err;
  606. a0 = 0; /* paddr = 0 to unset notify buffer */
  607. a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
  608. a1 += sizeof(struct vnic_devcmd_notify);
  609. err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  610. vdev->notify = NULL;
  611. vdev->notify_pa = 0;
  612. vdev->notify_sz = 0;
  613. return err;
  614. }
  615. int vnic_dev_notify_unset(struct vnic_dev *vdev)
  616. {
  617. if (vdev->notify) {
  618. pci_free_consistent(vdev->pdev,
  619. sizeof(struct vnic_devcmd_notify),
  620. vdev->notify,
  621. vdev->notify_pa);
  622. }
  623. return vnic_dev_notify_unsetcmd(vdev);
  624. }
  625. static int vnic_dev_notify_ready(struct vnic_dev *vdev)
  626. {
  627. u32 *words;
  628. unsigned int nwords = vdev->notify_sz / 4;
  629. unsigned int i;
  630. u32 csum;
  631. if (!vdev->notify || !vdev->notify_sz)
  632. return 0;
  633. do {
  634. csum = 0;
  635. memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
  636. words = (u32 *)&vdev->notify_copy;
  637. for (i = 1; i < nwords; i++)
  638. csum += words[i];
  639. } while (csum != words[0]);
  640. return 1;
  641. }
  642. int vnic_dev_init(struct vnic_dev *vdev, int arg)
  643. {
  644. u64 a0 = (u32)arg, a1 = 0;
  645. int wait = 1000;
  646. int r = 0;
  647. if (vnic_dev_capable(vdev, CMD_INIT))
  648. r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
  649. else {
  650. vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
  651. if (a0 & CMD_INITF_DEFAULT_MAC) {
  652. /* Emulate these for old CMD_INIT_v1 which
  653. * didn't pass a0 so no CMD_INITF_*.
  654. */
  655. vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
  656. vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
  657. }
  658. }
  659. return r;
  660. }
  661. int vnic_dev_deinit(struct vnic_dev *vdev)
  662. {
  663. u64 a0 = 0, a1 = 0;
  664. int wait = 1000;
  665. return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
  666. }
  667. void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
  668. {
  669. /* Default: hardware intr coal timer is in units of 1.5 usecs */
  670. vdev->intr_coal_timer_info.mul = 2;
  671. vdev->intr_coal_timer_info.div = 3;
  672. vdev->intr_coal_timer_info.max_usec =
  673. vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
  674. }
  675. int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
  676. {
  677. int wait = 1000;
  678. int err;
  679. memset(vdev->args, 0, sizeof(vdev->args));
  680. if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
  681. err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
  682. else
  683. err = ERR_ECMDUNKNOWN;
  684. /* Use defaults when firmware doesn't support the devcmd at all or
  685. * supports it for only specific hardware
  686. */
  687. if ((err == ERR_ECMDUNKNOWN) ||
  688. (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
  689. pr_warning("Using default conversion factor for "
  690. "interrupt coalesce timer\n");
  691. vnic_dev_intr_coal_timer_info_default(vdev);
  692. return 0;
  693. }
  694. if (!err) {
  695. vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
  696. vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
  697. vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
  698. }
  699. return err;
  700. }
  701. int vnic_dev_link_status(struct vnic_dev *vdev)
  702. {
  703. if (!vnic_dev_notify_ready(vdev))
  704. return 0;
  705. return vdev->notify_copy.link_state;
  706. }
  707. u32 vnic_dev_port_speed(struct vnic_dev *vdev)
  708. {
  709. if (!vnic_dev_notify_ready(vdev))
  710. return 0;
  711. return vdev->notify_copy.port_speed;
  712. }
  713. u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
  714. {
  715. if (!vnic_dev_notify_ready(vdev))
  716. return 0;
  717. return vdev->notify_copy.msglvl;
  718. }
  719. u32 vnic_dev_mtu(struct vnic_dev *vdev)
  720. {
  721. if (!vnic_dev_notify_ready(vdev))
  722. return 0;
  723. return vdev->notify_copy.mtu;
  724. }
  725. void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
  726. enum vnic_dev_intr_mode intr_mode)
  727. {
  728. vdev->intr_mode = intr_mode;
  729. }
  730. enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
  731. struct vnic_dev *vdev)
  732. {
  733. return vdev->intr_mode;
  734. }
  735. u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
  736. {
  737. return (usec * vdev->intr_coal_timer_info.mul) /
  738. vdev->intr_coal_timer_info.div;
  739. }
  740. u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
  741. {
  742. return (hw_cycles * vdev->intr_coal_timer_info.div) /
  743. vdev->intr_coal_timer_info.mul;
  744. }
  745. u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
  746. {
  747. return vdev->intr_coal_timer_info.max_usec;
  748. }
  749. void vnic_dev_unregister(struct vnic_dev *vdev)
  750. {
  751. if (vdev) {
  752. if (vdev->notify)
  753. pci_free_consistent(vdev->pdev,
  754. sizeof(struct vnic_devcmd_notify),
  755. vdev->notify,
  756. vdev->notify_pa);
  757. if (vdev->stats)
  758. pci_free_consistent(vdev->pdev,
  759. sizeof(struct vnic_stats),
  760. vdev->stats, vdev->stats_pa);
  761. if (vdev->fw_info)
  762. pci_free_consistent(vdev->pdev,
  763. sizeof(struct vnic_devcmd_fw_info),
  764. vdev->fw_info, vdev->fw_info_pa);
  765. kfree(vdev);
  766. }
  767. }
  768. struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
  769. void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
  770. unsigned int num_bars)
  771. {
  772. if (!vdev) {
  773. vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
  774. if (!vdev)
  775. return NULL;
  776. }
  777. vdev->priv = priv;
  778. vdev->pdev = pdev;
  779. if (vnic_dev_discover_res(vdev, bar, num_bars))
  780. goto err_out;
  781. vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
  782. if (!vdev->devcmd)
  783. goto err_out;
  784. return vdev;
  785. err_out:
  786. vnic_dev_unregister(vdev);
  787. return NULL;
  788. }
  789. int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
  790. {
  791. u64 a0, a1 = len;
  792. int wait = 1000;
  793. dma_addr_t prov_pa;
  794. void *prov_buf;
  795. int ret;
  796. prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
  797. if (!prov_buf)
  798. return -ENOMEM;
  799. memcpy(prov_buf, buf, len);
  800. a0 = prov_pa;
  801. ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
  802. pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
  803. return ret;
  804. }
  805. int vnic_dev_enable2(struct vnic_dev *vdev, int active)
  806. {
  807. u64 a0, a1 = 0;
  808. int wait = 1000;
  809. a0 = (active ? CMD_ENABLE2_ACTIVE : 0);
  810. return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
  811. }
  812. static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  813. int *status)
  814. {
  815. u64 a0 = cmd, a1 = 0;
  816. int wait = 1000;
  817. int ret;
  818. ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
  819. if (!ret)
  820. *status = (int)a0;
  821. return ret;
  822. }
  823. int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
  824. {
  825. return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
  826. }
  827. int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
  828. {
  829. return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
  830. }
  831. int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  832. {
  833. u64 a0, a1;
  834. int wait = 1000;
  835. int i;
  836. for (i = 0; i < ETH_ALEN; i++)
  837. ((u8 *)&a0)[i] = mac_addr[i];
  838. return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait);
  839. }