qlcnic_minidump.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. #include "qlcnic.h"
  2. #include "qlcnic_hdr.h"
  3. #include <net/ip.h>
  4. #define QLCNIC_DUMP_WCRB BIT_0
  5. #define QLCNIC_DUMP_RWCRB BIT_1
  6. #define QLCNIC_DUMP_ANDCRB BIT_2
  7. #define QLCNIC_DUMP_ORCRB BIT_3
  8. #define QLCNIC_DUMP_POLLCRB BIT_4
  9. #define QLCNIC_DUMP_RD_SAVE BIT_5
  10. #define QLCNIC_DUMP_WRT_SAVED BIT_6
  11. #define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
  12. #define QLCNIC_DUMP_SKIP BIT_7
  13. #define QLCNIC_DUMP_MASK_MAX 0xff
  14. struct qlcnic_common_entry_hdr {
  15. u32 type;
  16. u32 offset;
  17. u32 cap_size;
  18. u8 mask;
  19. u8 rsvd[2];
  20. u8 flags;
  21. } __packed;
  22. struct __crb {
  23. u32 addr;
  24. u8 stride;
  25. u8 rsvd1[3];
  26. u32 data_size;
  27. u32 no_ops;
  28. u32 rsvd2[4];
  29. } __packed;
  30. struct __ctrl {
  31. u32 addr;
  32. u8 stride;
  33. u8 index_a;
  34. u16 timeout;
  35. u32 data_size;
  36. u32 no_ops;
  37. u8 opcode;
  38. u8 index_v;
  39. u8 shl_val;
  40. u8 shr_val;
  41. u32 val1;
  42. u32 val2;
  43. u32 val3;
  44. } __packed;
  45. struct __cache {
  46. u32 addr;
  47. u16 stride;
  48. u16 init_tag_val;
  49. u32 size;
  50. u32 no_ops;
  51. u32 ctrl_addr;
  52. u32 ctrl_val;
  53. u32 read_addr;
  54. u8 read_addr_stride;
  55. u8 read_addr_num;
  56. u8 rsvd1[2];
  57. } __packed;
  58. struct __ocm {
  59. u8 rsvd[8];
  60. u32 size;
  61. u32 no_ops;
  62. u8 rsvd1[8];
  63. u32 read_addr;
  64. u32 read_addr_stride;
  65. } __packed;
  66. struct __mem {
  67. u8 rsvd[24];
  68. u32 addr;
  69. u32 size;
  70. } __packed;
  71. struct __mux {
  72. u32 addr;
  73. u8 rsvd[4];
  74. u32 size;
  75. u32 no_ops;
  76. u32 val;
  77. u32 val_stride;
  78. u32 read_addr;
  79. u8 rsvd2[4];
  80. } __packed;
  81. struct __queue {
  82. u32 sel_addr;
  83. u16 stride;
  84. u8 rsvd[2];
  85. u32 size;
  86. u32 no_ops;
  87. u8 rsvd2[8];
  88. u32 read_addr;
  89. u8 read_addr_stride;
  90. u8 read_addr_cnt;
  91. u8 rsvd3[2];
  92. } __packed;
  93. struct qlcnic_dump_entry {
  94. struct qlcnic_common_entry_hdr hdr;
  95. union {
  96. struct __crb crb;
  97. struct __cache cache;
  98. struct __ocm ocm;
  99. struct __mem mem;
  100. struct __mux mux;
  101. struct __queue que;
  102. struct __ctrl ctrl;
  103. } region;
  104. } __packed;
  105. enum qlcnic_minidump_opcode {
  106. QLCNIC_DUMP_NOP = 0,
  107. QLCNIC_DUMP_READ_CRB = 1,
  108. QLCNIC_DUMP_READ_MUX = 2,
  109. QLCNIC_DUMP_QUEUE = 3,
  110. QLCNIC_DUMP_BRD_CONFIG = 4,
  111. QLCNIC_DUMP_READ_OCM = 6,
  112. QLCNIC_DUMP_PEG_REG = 7,
  113. QLCNIC_DUMP_L1_DTAG = 8,
  114. QLCNIC_DUMP_L1_ITAG = 9,
  115. QLCNIC_DUMP_L1_DATA = 11,
  116. QLCNIC_DUMP_L1_INST = 12,
  117. QLCNIC_DUMP_L2_DTAG = 21,
  118. QLCNIC_DUMP_L2_ITAG = 22,
  119. QLCNIC_DUMP_L2_DATA = 23,
  120. QLCNIC_DUMP_L2_INST = 24,
  121. QLCNIC_DUMP_READ_ROM = 71,
  122. QLCNIC_DUMP_READ_MEM = 72,
  123. QLCNIC_DUMP_READ_CTRL = 98,
  124. QLCNIC_DUMP_TLHDR = 99,
  125. QLCNIC_DUMP_RDEND = 255
  126. };
  127. struct qlcnic_dump_operations {
  128. enum qlcnic_minidump_opcode opcode;
  129. u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
  130. __le32 *);
  131. };
  132. static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
  133. {
  134. u32 dest;
  135. void __iomem *window_reg;
  136. dest = addr & 0xFFFF0000;
  137. window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
  138. writel(dest, window_reg);
  139. readl(window_reg);
  140. window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
  141. *data = readl(window_reg);
  142. }
  143. static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
  144. {
  145. u32 dest;
  146. void __iomem *window_reg;
  147. dest = addr & 0xFFFF0000;
  148. window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
  149. writel(dest, window_reg);
  150. readl(window_reg);
  151. window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
  152. writel(data, window_reg);
  153. readl(window_reg);
  154. }
  155. /* FW dump related functions */
  156. static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
  157. struct qlcnic_dump_entry *entry, __le32 *buffer)
  158. {
  159. int i;
  160. u32 addr, data;
  161. struct __crb *crb = &entry->region.crb;
  162. void __iomem *base = adapter->ahw->pci_base0;
  163. addr = crb->addr;
  164. for (i = 0; i < crb->no_ops; i++) {
  165. qlcnic_read_dump_reg(addr, base, &data);
  166. *buffer++ = cpu_to_le32(addr);
  167. *buffer++ = cpu_to_le32(data);
  168. addr += crb->stride;
  169. }
  170. return crb->no_ops * 2 * sizeof(u32);
  171. }
  172. static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
  173. struct qlcnic_dump_entry *entry, __le32 *buffer)
  174. {
  175. int i, k, timeout = 0;
  176. void __iomem *base = adapter->ahw->pci_base0;
  177. u32 addr, data;
  178. u8 no_ops;
  179. struct __ctrl *ctr = &entry->region.ctrl;
  180. struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
  181. addr = ctr->addr;
  182. no_ops = ctr->no_ops;
  183. for (i = 0; i < no_ops; i++) {
  184. k = 0;
  185. for (k = 0; k < 8; k++) {
  186. if (!(ctr->opcode & (1 << k)))
  187. continue;
  188. switch (1 << k) {
  189. case QLCNIC_DUMP_WCRB:
  190. qlcnic_write_dump_reg(addr, base, ctr->val1);
  191. break;
  192. case QLCNIC_DUMP_RWCRB:
  193. qlcnic_read_dump_reg(addr, base, &data);
  194. qlcnic_write_dump_reg(addr, base, data);
  195. break;
  196. case QLCNIC_DUMP_ANDCRB:
  197. qlcnic_read_dump_reg(addr, base, &data);
  198. qlcnic_write_dump_reg(addr, base,
  199. data & ctr->val2);
  200. break;
  201. case QLCNIC_DUMP_ORCRB:
  202. qlcnic_read_dump_reg(addr, base, &data);
  203. qlcnic_write_dump_reg(addr, base,
  204. data | ctr->val3);
  205. break;
  206. case QLCNIC_DUMP_POLLCRB:
  207. while (timeout <= ctr->timeout) {
  208. qlcnic_read_dump_reg(addr, base, &data);
  209. if ((data & ctr->val2) == ctr->val1)
  210. break;
  211. msleep(1);
  212. timeout++;
  213. }
  214. if (timeout > ctr->timeout) {
  215. dev_info(&adapter->pdev->dev,
  216. "Timed out, aborting poll CRB\n");
  217. return -EINVAL;
  218. }
  219. break;
  220. case QLCNIC_DUMP_RD_SAVE:
  221. if (ctr->index_a)
  222. addr = t_hdr->saved_state[ctr->index_a];
  223. qlcnic_read_dump_reg(addr, base, &data);
  224. t_hdr->saved_state[ctr->index_v] = data;
  225. break;
  226. case QLCNIC_DUMP_WRT_SAVED:
  227. if (ctr->index_v)
  228. data = t_hdr->saved_state[ctr->index_v];
  229. else
  230. data = ctr->val1;
  231. if (ctr->index_a)
  232. addr = t_hdr->saved_state[ctr->index_a];
  233. qlcnic_write_dump_reg(addr, base, data);
  234. break;
  235. case QLCNIC_DUMP_MOD_SAVE_ST:
  236. data = t_hdr->saved_state[ctr->index_v];
  237. data <<= ctr->shl_val;
  238. data >>= ctr->shr_val;
  239. if (ctr->val2)
  240. data &= ctr->val2;
  241. data |= ctr->val3;
  242. data += ctr->val1;
  243. t_hdr->saved_state[ctr->index_v] = data;
  244. break;
  245. default:
  246. dev_info(&adapter->pdev->dev,
  247. "Unknown opcode\n");
  248. break;
  249. }
  250. }
  251. addr += ctr->stride;
  252. }
  253. return 0;
  254. }
  255. static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
  256. struct qlcnic_dump_entry *entry, __le32 *buffer)
  257. {
  258. int loop;
  259. u32 val, data = 0;
  260. struct __mux *mux = &entry->region.mux;
  261. void __iomem *base = adapter->ahw->pci_base0;
  262. val = mux->val;
  263. for (loop = 0; loop < mux->no_ops; loop++) {
  264. qlcnic_write_dump_reg(mux->addr, base, val);
  265. qlcnic_read_dump_reg(mux->read_addr, base, &data);
  266. *buffer++ = cpu_to_le32(val);
  267. *buffer++ = cpu_to_le32(data);
  268. val += mux->val_stride;
  269. }
  270. return 2 * mux->no_ops * sizeof(u32);
  271. }
  272. static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
  273. struct qlcnic_dump_entry *entry, __le32 *buffer)
  274. {
  275. int i, loop;
  276. u32 cnt, addr, data, que_id = 0;
  277. void __iomem *base = adapter->ahw->pci_base0;
  278. struct __queue *que = &entry->region.que;
  279. addr = que->read_addr;
  280. cnt = que->read_addr_cnt;
  281. for (loop = 0; loop < que->no_ops; loop++) {
  282. qlcnic_write_dump_reg(que->sel_addr, base, que_id);
  283. addr = que->read_addr;
  284. for (i = 0; i < cnt; i++) {
  285. qlcnic_read_dump_reg(addr, base, &data);
  286. *buffer++ = cpu_to_le32(data);
  287. addr += que->read_addr_stride;
  288. }
  289. que_id += que->stride;
  290. }
  291. return que->no_ops * cnt * sizeof(u32);
  292. }
  293. static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
  294. struct qlcnic_dump_entry *entry, __le32 *buffer)
  295. {
  296. int i;
  297. u32 data;
  298. void __iomem *addr;
  299. struct __ocm *ocm = &entry->region.ocm;
  300. addr = adapter->ahw->pci_base0 + ocm->read_addr;
  301. for (i = 0; i < ocm->no_ops; i++) {
  302. data = readl(addr);
  303. *buffer++ = cpu_to_le32(data);
  304. addr += ocm->read_addr_stride;
  305. }
  306. return ocm->no_ops * sizeof(u32);
  307. }
  308. static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
  309. struct qlcnic_dump_entry *entry, __le32 *buffer)
  310. {
  311. int i, count = 0;
  312. u32 fl_addr, size, val, lck_val, addr;
  313. struct __mem *rom = &entry->region.mem;
  314. void __iomem *base = adapter->ahw->pci_base0;
  315. fl_addr = rom->addr;
  316. size = rom->size/4;
  317. lock_try:
  318. lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
  319. if (!lck_val && count < MAX_CTL_CHECK) {
  320. msleep(10);
  321. count++;
  322. goto lock_try;
  323. }
  324. writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
  325. for (i = 0; i < size; i++) {
  326. addr = fl_addr & 0xFFFF0000;
  327. qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
  328. addr = LSW(fl_addr) + FLASH_ROM_DATA;
  329. qlcnic_read_dump_reg(addr, base, &val);
  330. fl_addr += 4;
  331. *buffer++ = cpu_to_le32(val);
  332. }
  333. readl(base + QLCNIC_FLASH_SEM2_ULK);
  334. return rom->size;
  335. }
  336. static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
  337. struct qlcnic_dump_entry *entry, __le32 *buffer)
  338. {
  339. int i;
  340. u32 cnt, val, data, addr;
  341. void __iomem *base = adapter->ahw->pci_base0;
  342. struct __cache *l1 = &entry->region.cache;
  343. val = l1->init_tag_val;
  344. for (i = 0; i < l1->no_ops; i++) {
  345. qlcnic_write_dump_reg(l1->addr, base, val);
  346. qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
  347. addr = l1->read_addr;
  348. cnt = l1->read_addr_num;
  349. while (cnt) {
  350. qlcnic_read_dump_reg(addr, base, &data);
  351. *buffer++ = cpu_to_le32(data);
  352. addr += l1->read_addr_stride;
  353. cnt--;
  354. }
  355. val += l1->stride;
  356. }
  357. return l1->no_ops * l1->read_addr_num * sizeof(u32);
  358. }
  359. static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
  360. struct qlcnic_dump_entry *entry, __le32 *buffer)
  361. {
  362. int i;
  363. u32 cnt, val, data, addr;
  364. u8 poll_mask, poll_to, time_out = 0;
  365. void __iomem *base = adapter->ahw->pci_base0;
  366. struct __cache *l2 = &entry->region.cache;
  367. val = l2->init_tag_val;
  368. poll_mask = LSB(MSW(l2->ctrl_val));
  369. poll_to = MSB(MSW(l2->ctrl_val));
  370. for (i = 0; i < l2->no_ops; i++) {
  371. qlcnic_write_dump_reg(l2->addr, base, val);
  372. if (LSW(l2->ctrl_val))
  373. qlcnic_write_dump_reg(l2->ctrl_addr, base,
  374. LSW(l2->ctrl_val));
  375. if (!poll_mask)
  376. goto skip_poll;
  377. do {
  378. qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
  379. if (!(data & poll_mask))
  380. break;
  381. msleep(1);
  382. time_out++;
  383. } while (time_out <= poll_to);
  384. if (time_out > poll_to) {
  385. dev_err(&adapter->pdev->dev,
  386. "Timeout exceeded in %s, aborting dump\n",
  387. __func__);
  388. return -EINVAL;
  389. }
  390. skip_poll:
  391. addr = l2->read_addr;
  392. cnt = l2->read_addr_num;
  393. while (cnt) {
  394. qlcnic_read_dump_reg(addr, base, &data);
  395. *buffer++ = cpu_to_le32(data);
  396. addr += l2->read_addr_stride;
  397. cnt--;
  398. }
  399. val += l2->stride;
  400. }
  401. return l2->no_ops * l2->read_addr_num * sizeof(u32);
  402. }
  403. static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
  404. struct qlcnic_dump_entry *entry, __le32 *buffer)
  405. {
  406. u32 addr, data, test, ret = 0;
  407. int i, reg_read;
  408. struct __mem *mem = &entry->region.mem;
  409. void __iomem *base = adapter->ahw->pci_base0;
  410. reg_read = mem->size;
  411. addr = mem->addr;
  412. /* check for data size of multiple of 16 and 16 byte alignment */
  413. if ((addr & 0xf) || (reg_read%16)) {
  414. dev_info(&adapter->pdev->dev,
  415. "Unaligned memory addr:0x%x size:0x%x\n",
  416. addr, reg_read);
  417. return -EINVAL;
  418. }
  419. mutex_lock(&adapter->ahw->mem_lock);
  420. while (reg_read != 0) {
  421. qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
  422. qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
  423. qlcnic_write_dump_reg(MIU_TEST_CTR, base,
  424. TA_CTL_ENABLE | TA_CTL_START);
  425. for (i = 0; i < MAX_CTL_CHECK; i++) {
  426. qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
  427. if (!(test & TA_CTL_BUSY))
  428. break;
  429. }
  430. if (i == MAX_CTL_CHECK) {
  431. if (printk_ratelimit()) {
  432. dev_err(&adapter->pdev->dev,
  433. "failed to read through agent\n");
  434. ret = -EINVAL;
  435. goto out;
  436. }
  437. }
  438. for (i = 0; i < 4; i++) {
  439. qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
  440. &data);
  441. *buffer++ = cpu_to_le32(data);
  442. }
  443. addr += 16;
  444. reg_read -= 16;
  445. ret += 16;
  446. }
  447. out:
  448. mutex_unlock(&adapter->ahw->mem_lock);
  449. return mem->size;
  450. }
  451. static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
  452. struct qlcnic_dump_entry *entry, __le32 *buffer)
  453. {
  454. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  455. return 0;
  456. }
  457. static const struct qlcnic_dump_operations fw_dump_ops[] = {
  458. { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
  459. { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
  460. { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
  461. { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
  462. { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
  463. { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
  464. { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
  465. { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
  466. { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
  467. { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
  468. { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
  469. { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
  470. { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
  471. { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
  472. { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
  473. { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
  474. { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
  475. { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
  476. { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
  477. { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
  478. };
  479. /* Walk the template and collect dump for each entry in the dump template */
  480. static int
  481. qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
  482. u32 size)
  483. {
  484. int ret = 1;
  485. if (size != entry->hdr.cap_size) {
  486. dev_info(dev,
  487. "Invalid dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
  488. entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
  489. dev_info(dev, "Aborting further dump capture\n");
  490. ret = 0;
  491. }
  492. return ret;
  493. }
  494. int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
  495. {
  496. __le32 *buffer;
  497. char mesg[64];
  498. char *msg[] = {mesg, NULL};
  499. int i, k, ops_cnt, ops_index, dump_size = 0;
  500. u32 entry_offset, dump, no_entries, buf_offset = 0;
  501. struct qlcnic_dump_entry *entry;
  502. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  503. struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
  504. if (fw_dump->clr) {
  505. dev_info(&adapter->pdev->dev,
  506. "Previous dump not cleared, not capturing dump\n");
  507. return -EIO;
  508. }
  509. /* Calculate the size for dump data area only */
  510. for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
  511. if (i & tmpl_hdr->drv_cap_mask)
  512. dump_size += tmpl_hdr->cap_sizes[k];
  513. if (!dump_size)
  514. return -EIO;
  515. fw_dump->data = vzalloc(dump_size);
  516. if (!fw_dump->data) {
  517. dev_info(&adapter->pdev->dev,
  518. "Unable to allocate (%d KB) for fw dump\n",
  519. dump_size / 1024);
  520. return -ENOMEM;
  521. }
  522. buffer = fw_dump->data;
  523. fw_dump->size = dump_size;
  524. no_entries = tmpl_hdr->num_entries;
  525. ops_cnt = ARRAY_SIZE(fw_dump_ops);
  526. entry_offset = tmpl_hdr->offset;
  527. tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
  528. tmpl_hdr->sys_info[1] = adapter->fw_version;
  529. for (i = 0; i < no_entries; i++) {
  530. entry = (void *)tmpl_hdr + entry_offset;
  531. if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
  532. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  533. entry_offset += entry->hdr.offset;
  534. continue;
  535. }
  536. /* Find the handler for this entry */
  537. ops_index = 0;
  538. while (ops_index < ops_cnt) {
  539. if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
  540. break;
  541. ops_index++;
  542. }
  543. if (ops_index == ops_cnt) {
  544. dev_info(&adapter->pdev->dev,
  545. "Invalid entry type %d, exiting dump\n",
  546. entry->hdr.type);
  547. goto error;
  548. }
  549. /* Collect dump for this entry */
  550. dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
  551. if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
  552. dump))
  553. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  554. buf_offset += entry->hdr.cap_size;
  555. entry_offset += entry->hdr.offset;
  556. buffer = fw_dump->data + buf_offset;
  557. }
  558. if (dump_size != buf_offset) {
  559. dev_info(&adapter->pdev->dev,
  560. "Captured(%d) and expected size(%d) do not match\n",
  561. buf_offset, dump_size);
  562. goto error;
  563. } else {
  564. fw_dump->clr = 1;
  565. snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
  566. adapter->netdev->name);
  567. dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
  568. fw_dump->size);
  569. /* Send a udev event to notify availability of FW dump */
  570. kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
  571. return 0;
  572. }
  573. error:
  574. vfree(fw_dump->data);
  575. return -EINVAL;
  576. }