qlcnic_minidump.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243
  1. /*
  2. * QLogic qlcnic NIC Driver
  3. * Copyright (c) 2009-2013 QLogic Corporation
  4. *
  5. * See LICENSE.qlcnic for copyright and licensing details.
  6. */
  7. #include "qlcnic.h"
  8. #include "qlcnic_hdr.h"
  9. #include "qlcnic_83xx_hw.h"
  10. #include "qlcnic_hw.h"
  11. #include <net/ip.h>
  12. #define QLC_83XX_MINIDUMP_FLASH 0x520000
  13. #define QLC_83XX_OCM_INDEX 3
  14. #define QLC_83XX_PCI_INDEX 0
  15. #define QLC_83XX_DMA_ENGINE_INDEX 8
  16. static const u32 qlcnic_ms_read_data[] = {
  17. 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
  18. };
  19. #define QLCNIC_DUMP_WCRB BIT_0
  20. #define QLCNIC_DUMP_RWCRB BIT_1
  21. #define QLCNIC_DUMP_ANDCRB BIT_2
  22. #define QLCNIC_DUMP_ORCRB BIT_3
  23. #define QLCNIC_DUMP_POLLCRB BIT_4
  24. #define QLCNIC_DUMP_RD_SAVE BIT_5
  25. #define QLCNIC_DUMP_WRT_SAVED BIT_6
  26. #define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
  27. #define QLCNIC_DUMP_SKIP BIT_7
  28. #define QLCNIC_DUMP_MASK_MAX 0xff
  29. struct qlcnic_pex_dma_descriptor {
  30. u32 read_data_size;
  31. u32 dma_desc_cmd;
  32. u32 src_addr_low;
  33. u32 src_addr_high;
  34. u32 dma_bus_addr_low;
  35. u32 dma_bus_addr_high;
  36. u32 rsvd[6];
  37. } __packed;
  38. struct qlcnic_common_entry_hdr {
  39. u32 type;
  40. u32 offset;
  41. u32 cap_size;
  42. u8 mask;
  43. u8 rsvd[2];
  44. u8 flags;
  45. } __packed;
  46. struct __crb {
  47. u32 addr;
  48. u8 stride;
  49. u8 rsvd1[3];
  50. u32 data_size;
  51. u32 no_ops;
  52. u32 rsvd2[4];
  53. } __packed;
  54. struct __ctrl {
  55. u32 addr;
  56. u8 stride;
  57. u8 index_a;
  58. u16 timeout;
  59. u32 data_size;
  60. u32 no_ops;
  61. u8 opcode;
  62. u8 index_v;
  63. u8 shl_val;
  64. u8 shr_val;
  65. u32 val1;
  66. u32 val2;
  67. u32 val3;
  68. } __packed;
  69. struct __cache {
  70. u32 addr;
  71. u16 stride;
  72. u16 init_tag_val;
  73. u32 size;
  74. u32 no_ops;
  75. u32 ctrl_addr;
  76. u32 ctrl_val;
  77. u32 read_addr;
  78. u8 read_addr_stride;
  79. u8 read_addr_num;
  80. u8 rsvd1[2];
  81. } __packed;
  82. struct __ocm {
  83. u8 rsvd[8];
  84. u32 size;
  85. u32 no_ops;
  86. u8 rsvd1[8];
  87. u32 read_addr;
  88. u32 read_addr_stride;
  89. } __packed;
  90. struct __mem {
  91. u32 desc_card_addr;
  92. u32 dma_desc_cmd;
  93. u32 start_dma_cmd;
  94. u32 rsvd[3];
  95. u32 addr;
  96. u32 size;
  97. } __packed;
  98. struct __mux {
  99. u32 addr;
  100. u8 rsvd[4];
  101. u32 size;
  102. u32 no_ops;
  103. u32 val;
  104. u32 val_stride;
  105. u32 read_addr;
  106. u8 rsvd2[4];
  107. } __packed;
  108. struct __queue {
  109. u32 sel_addr;
  110. u16 stride;
  111. u8 rsvd[2];
  112. u32 size;
  113. u32 no_ops;
  114. u8 rsvd2[8];
  115. u32 read_addr;
  116. u8 read_addr_stride;
  117. u8 read_addr_cnt;
  118. u8 rsvd3[2];
  119. } __packed;
  120. struct __pollrd {
  121. u32 sel_addr;
  122. u32 read_addr;
  123. u32 sel_val;
  124. u16 sel_val_stride;
  125. u16 no_ops;
  126. u32 poll_wait;
  127. u32 poll_mask;
  128. u32 data_size;
  129. u8 rsvd[4];
  130. } __packed;
  131. struct __mux2 {
  132. u32 sel_addr1;
  133. u32 sel_addr2;
  134. u32 sel_val1;
  135. u32 sel_val2;
  136. u32 no_ops;
  137. u32 sel_val_mask;
  138. u32 read_addr;
  139. u8 sel_val_stride;
  140. u8 data_size;
  141. u8 rsvd[2];
  142. } __packed;
  143. struct __pollrdmwr {
  144. u32 addr1;
  145. u32 addr2;
  146. u32 val1;
  147. u32 val2;
  148. u32 poll_wait;
  149. u32 poll_mask;
  150. u32 mod_mask;
  151. u32 data_size;
  152. } __packed;
  153. struct qlcnic_dump_entry {
  154. struct qlcnic_common_entry_hdr hdr;
  155. union {
  156. struct __crb crb;
  157. struct __cache cache;
  158. struct __ocm ocm;
  159. struct __mem mem;
  160. struct __mux mux;
  161. struct __queue que;
  162. struct __ctrl ctrl;
  163. struct __pollrdmwr pollrdmwr;
  164. struct __mux2 mux2;
  165. struct __pollrd pollrd;
  166. } region;
  167. } __packed;
  168. enum qlcnic_minidump_opcode {
  169. QLCNIC_DUMP_NOP = 0,
  170. QLCNIC_DUMP_READ_CRB = 1,
  171. QLCNIC_DUMP_READ_MUX = 2,
  172. QLCNIC_DUMP_QUEUE = 3,
  173. QLCNIC_DUMP_BRD_CONFIG = 4,
  174. QLCNIC_DUMP_READ_OCM = 6,
  175. QLCNIC_DUMP_PEG_REG = 7,
  176. QLCNIC_DUMP_L1_DTAG = 8,
  177. QLCNIC_DUMP_L1_ITAG = 9,
  178. QLCNIC_DUMP_L1_DATA = 11,
  179. QLCNIC_DUMP_L1_INST = 12,
  180. QLCNIC_DUMP_L2_DTAG = 21,
  181. QLCNIC_DUMP_L2_ITAG = 22,
  182. QLCNIC_DUMP_L2_DATA = 23,
  183. QLCNIC_DUMP_L2_INST = 24,
  184. QLCNIC_DUMP_POLL_RD = 35,
  185. QLCNIC_READ_MUX2 = 36,
  186. QLCNIC_READ_POLLRDMWR = 37,
  187. QLCNIC_DUMP_READ_ROM = 71,
  188. QLCNIC_DUMP_READ_MEM = 72,
  189. QLCNIC_DUMP_READ_CTRL = 98,
  190. QLCNIC_DUMP_TLHDR = 99,
  191. QLCNIC_DUMP_RDEND = 255
  192. };
  193. struct qlcnic_dump_operations {
  194. enum qlcnic_minidump_opcode opcode;
  195. u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
  196. __le32 *);
  197. };
  198. static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
  199. struct qlcnic_dump_entry *entry, __le32 *buffer)
  200. {
  201. int i;
  202. u32 addr, data;
  203. struct __crb *crb = &entry->region.crb;
  204. addr = crb->addr;
  205. for (i = 0; i < crb->no_ops; i++) {
  206. data = qlcnic_ind_rd(adapter, addr);
  207. *buffer++ = cpu_to_le32(addr);
  208. *buffer++ = cpu_to_le32(data);
  209. addr += crb->stride;
  210. }
  211. return crb->no_ops * 2 * sizeof(u32);
  212. }
  213. static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
  214. struct qlcnic_dump_entry *entry, __le32 *buffer)
  215. {
  216. int i, k, timeout = 0;
  217. u32 addr, data;
  218. u8 no_ops;
  219. struct __ctrl *ctr = &entry->region.ctrl;
  220. struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
  221. addr = ctr->addr;
  222. no_ops = ctr->no_ops;
  223. for (i = 0; i < no_ops; i++) {
  224. k = 0;
  225. for (k = 0; k < 8; k++) {
  226. if (!(ctr->opcode & (1 << k)))
  227. continue;
  228. switch (1 << k) {
  229. case QLCNIC_DUMP_WCRB:
  230. qlcnic_ind_wr(adapter, addr, ctr->val1);
  231. break;
  232. case QLCNIC_DUMP_RWCRB:
  233. data = qlcnic_ind_rd(adapter, addr);
  234. qlcnic_ind_wr(adapter, addr, data);
  235. break;
  236. case QLCNIC_DUMP_ANDCRB:
  237. data = qlcnic_ind_rd(adapter, addr);
  238. qlcnic_ind_wr(adapter, addr,
  239. (data & ctr->val2));
  240. break;
  241. case QLCNIC_DUMP_ORCRB:
  242. data = qlcnic_ind_rd(adapter, addr);
  243. qlcnic_ind_wr(adapter, addr,
  244. (data | ctr->val3));
  245. break;
  246. case QLCNIC_DUMP_POLLCRB:
  247. while (timeout <= ctr->timeout) {
  248. data = qlcnic_ind_rd(adapter, addr);
  249. if ((data & ctr->val2) == ctr->val1)
  250. break;
  251. usleep_range(1000, 2000);
  252. timeout++;
  253. }
  254. if (timeout > ctr->timeout) {
  255. dev_info(&adapter->pdev->dev,
  256. "Timed out, aborting poll CRB\n");
  257. return -EINVAL;
  258. }
  259. break;
  260. case QLCNIC_DUMP_RD_SAVE:
  261. if (ctr->index_a)
  262. addr = t_hdr->saved_state[ctr->index_a];
  263. data = qlcnic_ind_rd(adapter, addr);
  264. t_hdr->saved_state[ctr->index_v] = data;
  265. break;
  266. case QLCNIC_DUMP_WRT_SAVED:
  267. if (ctr->index_v)
  268. data = t_hdr->saved_state[ctr->index_v];
  269. else
  270. data = ctr->val1;
  271. if (ctr->index_a)
  272. addr = t_hdr->saved_state[ctr->index_a];
  273. qlcnic_ind_wr(adapter, addr, data);
  274. break;
  275. case QLCNIC_DUMP_MOD_SAVE_ST:
  276. data = t_hdr->saved_state[ctr->index_v];
  277. data <<= ctr->shl_val;
  278. data >>= ctr->shr_val;
  279. if (ctr->val2)
  280. data &= ctr->val2;
  281. data |= ctr->val3;
  282. data += ctr->val1;
  283. t_hdr->saved_state[ctr->index_v] = data;
  284. break;
  285. default:
  286. dev_info(&adapter->pdev->dev,
  287. "Unknown opcode\n");
  288. break;
  289. }
  290. }
  291. addr += ctr->stride;
  292. }
  293. return 0;
  294. }
  295. static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
  296. struct qlcnic_dump_entry *entry, __le32 *buffer)
  297. {
  298. int loop;
  299. u32 val, data = 0;
  300. struct __mux *mux = &entry->region.mux;
  301. val = mux->val;
  302. for (loop = 0; loop < mux->no_ops; loop++) {
  303. qlcnic_ind_wr(adapter, mux->addr, val);
  304. data = qlcnic_ind_rd(adapter, mux->read_addr);
  305. *buffer++ = cpu_to_le32(val);
  306. *buffer++ = cpu_to_le32(data);
  307. val += mux->val_stride;
  308. }
  309. return 2 * mux->no_ops * sizeof(u32);
  310. }
  311. static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
  312. struct qlcnic_dump_entry *entry, __le32 *buffer)
  313. {
  314. int i, loop;
  315. u32 cnt, addr, data, que_id = 0;
  316. struct __queue *que = &entry->region.que;
  317. addr = que->read_addr;
  318. cnt = que->read_addr_cnt;
  319. for (loop = 0; loop < que->no_ops; loop++) {
  320. qlcnic_ind_wr(adapter, que->sel_addr, que_id);
  321. addr = que->read_addr;
  322. for (i = 0; i < cnt; i++) {
  323. data = qlcnic_ind_rd(adapter, addr);
  324. *buffer++ = cpu_to_le32(data);
  325. addr += que->read_addr_stride;
  326. }
  327. que_id += que->stride;
  328. }
  329. return que->no_ops * cnt * sizeof(u32);
  330. }
  331. static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
  332. struct qlcnic_dump_entry *entry, __le32 *buffer)
  333. {
  334. int i;
  335. u32 data;
  336. void __iomem *addr;
  337. struct __ocm *ocm = &entry->region.ocm;
  338. addr = adapter->ahw->pci_base0 + ocm->read_addr;
  339. for (i = 0; i < ocm->no_ops; i++) {
  340. data = readl(addr);
  341. *buffer++ = cpu_to_le32(data);
  342. addr += ocm->read_addr_stride;
  343. }
  344. return ocm->no_ops * sizeof(u32);
  345. }
  346. static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
  347. struct qlcnic_dump_entry *entry, __le32 *buffer)
  348. {
  349. int i, count = 0;
  350. u32 fl_addr, size, val, lck_val, addr;
  351. struct __mem *rom = &entry->region.mem;
  352. fl_addr = rom->addr;
  353. size = rom->size / 4;
  354. lock_try:
  355. lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
  356. if (!lck_val && count < MAX_CTL_CHECK) {
  357. usleep_range(10000, 11000);
  358. count++;
  359. goto lock_try;
  360. }
  361. QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
  362. adapter->ahw->pci_func);
  363. for (i = 0; i < size; i++) {
  364. addr = fl_addr & 0xFFFF0000;
  365. qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
  366. addr = LSW(fl_addr) + FLASH_ROM_DATA;
  367. val = qlcnic_ind_rd(adapter, addr);
  368. fl_addr += 4;
  369. *buffer++ = cpu_to_le32(val);
  370. }
  371. QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
  372. return rom->size;
  373. }
  374. static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
  375. struct qlcnic_dump_entry *entry, __le32 *buffer)
  376. {
  377. int i;
  378. u32 cnt, val, data, addr;
  379. struct __cache *l1 = &entry->region.cache;
  380. val = l1->init_tag_val;
  381. for (i = 0; i < l1->no_ops; i++) {
  382. qlcnic_ind_wr(adapter, l1->addr, val);
  383. qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
  384. addr = l1->read_addr;
  385. cnt = l1->read_addr_num;
  386. while (cnt) {
  387. data = qlcnic_ind_rd(adapter, addr);
  388. *buffer++ = cpu_to_le32(data);
  389. addr += l1->read_addr_stride;
  390. cnt--;
  391. }
  392. val += l1->stride;
  393. }
  394. return l1->no_ops * l1->read_addr_num * sizeof(u32);
  395. }
  396. static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
  397. struct qlcnic_dump_entry *entry, __le32 *buffer)
  398. {
  399. int i;
  400. u32 cnt, val, data, addr;
  401. u8 poll_mask, poll_to, time_out = 0;
  402. struct __cache *l2 = &entry->region.cache;
  403. val = l2->init_tag_val;
  404. poll_mask = LSB(MSW(l2->ctrl_val));
  405. poll_to = MSB(MSW(l2->ctrl_val));
  406. for (i = 0; i < l2->no_ops; i++) {
  407. qlcnic_ind_wr(adapter, l2->addr, val);
  408. if (LSW(l2->ctrl_val))
  409. qlcnic_ind_wr(adapter, l2->ctrl_addr,
  410. LSW(l2->ctrl_val));
  411. if (!poll_mask)
  412. goto skip_poll;
  413. do {
  414. data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
  415. if (!(data & poll_mask))
  416. break;
  417. usleep_range(1000, 2000);
  418. time_out++;
  419. } while (time_out <= poll_to);
  420. if (time_out > poll_to) {
  421. dev_err(&adapter->pdev->dev,
  422. "Timeout exceeded in %s, aborting dump\n",
  423. __func__);
  424. return -EINVAL;
  425. }
  426. skip_poll:
  427. addr = l2->read_addr;
  428. cnt = l2->read_addr_num;
  429. while (cnt) {
  430. data = qlcnic_ind_rd(adapter, addr);
  431. *buffer++ = cpu_to_le32(data);
  432. addr += l2->read_addr_stride;
  433. cnt--;
  434. }
  435. val += l2->stride;
  436. }
  437. return l2->no_ops * l2->read_addr_num * sizeof(u32);
  438. }
  439. static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
  440. struct __mem *mem, __le32 *buffer,
  441. int *ret)
  442. {
  443. u32 addr, data, test;
  444. int i, reg_read;
  445. reg_read = mem->size;
  446. addr = mem->addr;
  447. /* check for data size of multiple of 16 and 16 byte alignment */
  448. if ((addr & 0xf) || (reg_read%16)) {
  449. dev_info(&adapter->pdev->dev,
  450. "Unaligned memory addr:0x%x size:0x%x\n",
  451. addr, reg_read);
  452. *ret = -EINVAL;
  453. return 0;
  454. }
  455. mutex_lock(&adapter->ahw->mem_lock);
  456. while (reg_read != 0) {
  457. qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
  458. qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
  459. qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
  460. for (i = 0; i < MAX_CTL_CHECK; i++) {
  461. test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
  462. if (!(test & TA_CTL_BUSY))
  463. break;
  464. }
  465. if (i == MAX_CTL_CHECK) {
  466. if (printk_ratelimit()) {
  467. dev_err(&adapter->pdev->dev,
  468. "failed to read through agent\n");
  469. *ret = -EIO;
  470. goto out;
  471. }
  472. }
  473. for (i = 0; i < 4; i++) {
  474. data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
  475. *buffer++ = cpu_to_le32(data);
  476. }
  477. addr += 16;
  478. reg_read -= 16;
  479. ret += 16;
  480. }
  481. out:
  482. mutex_unlock(&adapter->ahw->mem_lock);
  483. return mem->size;
  484. }
  485. /* DMA register base address */
  486. #define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000))
  487. /* DMA register offsets w.r.t base address */
  488. #define QLC_DMA_CMD_BUFF_ADDR_LOW 0
  489. #define QLC_DMA_CMD_BUFF_ADDR_HI 4
  490. #define QLC_DMA_CMD_STATUS_CTRL 8
  491. #define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
  492. static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
  493. struct __mem *mem)
  494. {
  495. struct qlcnic_dump_template_hdr *tmpl_hdr;
  496. struct device *dev = &adapter->pdev->dev;
  497. u32 dma_no, dma_base_addr, temp_addr;
  498. int i, ret, dma_sts;
  499. tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
  500. dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
  501. dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
  502. temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
  503. ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
  504. mem->desc_card_addr);
  505. if (ret)
  506. return ret;
  507. temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
  508. ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0);
  509. if (ret)
  510. return ret;
  511. temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
  512. ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
  513. mem->start_dma_cmd);
  514. if (ret)
  515. return ret;
  516. /* Wait for DMA to complete */
  517. temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
  518. for (i = 0; i < 400; i++) {
  519. dma_sts = qlcnic_ind_rd(adapter, temp_addr);
  520. if (dma_sts & BIT_1)
  521. usleep_range(250, 500);
  522. else
  523. break;
  524. }
  525. if (i >= 400) {
  526. dev_info(dev, "PEX DMA operation timed out");
  527. ret = -EIO;
  528. }
  529. return ret;
  530. }
  531. static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
  532. struct __mem *mem,
  533. __le32 *buffer, int *ret)
  534. {
  535. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  536. u32 temp, dma_base_addr, size = 0, read_size = 0;
  537. struct qlcnic_pex_dma_descriptor *dma_descr;
  538. struct qlcnic_dump_template_hdr *tmpl_hdr;
  539. struct device *dev = &adapter->pdev->dev;
  540. dma_addr_t dma_phys_addr;
  541. void *dma_buffer;
  542. tmpl_hdr = fw_dump->tmpl_hdr;
  543. /* Check if DMA engine is available */
  544. temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
  545. dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
  546. temp = qlcnic_ind_rd(adapter,
  547. dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
  548. if (!(temp & BIT_31)) {
  549. dev_info(dev, "%s: DMA engine is not available\n", __func__);
  550. *ret = -EIO;
  551. return 0;
  552. }
  553. /* Create DMA descriptor */
  554. dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
  555. GFP_KERNEL);
  556. if (!dma_descr) {
  557. *ret = -ENOMEM;
  558. return 0;
  559. }
  560. /* dma_desc_cmd 0:15 = 0
  561. * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3
  562. * dma_desc_cmd 20:23 = pci function number
  563. * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15
  564. */
  565. dma_phys_addr = fw_dump->phys_addr;
  566. dma_buffer = fw_dump->dma_buffer;
  567. temp = 0;
  568. temp = mem->dma_desc_cmd & 0xff0f;
  569. temp |= (adapter->ahw->pci_func & 0xf) << 4;
  570. dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
  571. dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
  572. dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
  573. dma_descr->src_addr_high = 0;
  574. /* Collect memory dump using multiple DMA operations if required */
  575. while (read_size < mem->size) {
  576. if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
  577. size = QLC_PEX_DMA_READ_SIZE;
  578. else
  579. size = mem->size - read_size;
  580. dma_descr->src_addr_low = mem->addr + read_size;
  581. dma_descr->read_data_size = size;
  582. /* Write DMA descriptor to MS memory*/
  583. temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
  584. *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr,
  585. (u32 *)dma_descr, temp);
  586. if (*ret) {
  587. dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
  588. mem->desc_card_addr);
  589. goto free_dma_descr;
  590. }
  591. *ret = qlcnic_start_pex_dma(adapter, mem);
  592. if (*ret) {
  593. dev_info(dev, "Failed to start PEX DMA operation\n");
  594. goto free_dma_descr;
  595. }
  596. memcpy(buffer, dma_buffer, size);
  597. buffer += size / 4;
  598. read_size += size;
  599. }
  600. free_dma_descr:
  601. kfree(dma_descr);
  602. return read_size;
  603. }
  604. static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
  605. struct qlcnic_dump_entry *entry, __le32 *buffer)
  606. {
  607. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  608. struct device *dev = &adapter->pdev->dev;
  609. struct __mem *mem = &entry->region.mem;
  610. u32 data_size;
  611. int ret = 0;
  612. if (fw_dump->use_pex_dma) {
  613. data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
  614. &ret);
  615. if (ret)
  616. dev_info(dev,
  617. "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
  618. entry->hdr.mask);
  619. else
  620. return data_size;
  621. }
  622. data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
  623. if (ret) {
  624. dev_info(dev,
  625. "Failed to read memory dump using test agent method: mask[0x%x]\n",
  626. entry->hdr.mask);
  627. return 0;
  628. } else {
  629. return data_size;
  630. }
  631. }
  632. static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
  633. struct qlcnic_dump_entry *entry, __le32 *buffer)
  634. {
  635. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  636. return 0;
  637. }
  638. static int qlcnic_valid_dump_entry(struct device *dev,
  639. struct qlcnic_dump_entry *entry, u32 size)
  640. {
  641. int ret = 1;
  642. if (size != entry->hdr.cap_size) {
  643. dev_err(dev,
  644. "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
  645. entry->hdr.type, entry->hdr.mask, size,
  646. entry->hdr.cap_size);
  647. ret = 0;
  648. }
  649. return ret;
  650. }
  651. static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
  652. struct qlcnic_dump_entry *entry,
  653. __le32 *buffer)
  654. {
  655. struct __pollrdmwr *poll = &entry->region.pollrdmwr;
  656. u32 data, wait_count, poll_wait, temp;
  657. poll_wait = poll->poll_wait;
  658. qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
  659. wait_count = 0;
  660. while (wait_count < poll_wait) {
  661. data = qlcnic_ind_rd(adapter, poll->addr1);
  662. if ((data & poll->poll_mask) != 0)
  663. break;
  664. wait_count++;
  665. }
  666. if (wait_count == poll_wait) {
  667. dev_err(&adapter->pdev->dev,
  668. "Timeout exceeded in %s, aborting dump\n",
  669. __func__);
  670. return 0;
  671. }
  672. data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
  673. qlcnic_ind_wr(adapter, poll->addr2, data);
  674. qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
  675. wait_count = 0;
  676. while (wait_count < poll_wait) {
  677. temp = qlcnic_ind_rd(adapter, poll->addr1);
  678. if ((temp & poll->poll_mask) != 0)
  679. break;
  680. wait_count++;
  681. }
  682. *buffer++ = cpu_to_le32(poll->addr2);
  683. *buffer++ = cpu_to_le32(data);
  684. return 2 * sizeof(u32);
  685. }
  686. static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
  687. struct qlcnic_dump_entry *entry, __le32 *buffer)
  688. {
  689. struct __pollrd *pollrd = &entry->region.pollrd;
  690. u32 data, wait_count, poll_wait, sel_val;
  691. int i;
  692. poll_wait = pollrd->poll_wait;
  693. sel_val = pollrd->sel_val;
  694. for (i = 0; i < pollrd->no_ops; i++) {
  695. qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
  696. wait_count = 0;
  697. while (wait_count < poll_wait) {
  698. data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
  699. if ((data & pollrd->poll_mask) != 0)
  700. break;
  701. wait_count++;
  702. }
  703. if (wait_count == poll_wait) {
  704. dev_err(&adapter->pdev->dev,
  705. "Timeout exceeded in %s, aborting dump\n",
  706. __func__);
  707. return 0;
  708. }
  709. data = qlcnic_ind_rd(adapter, pollrd->read_addr);
  710. *buffer++ = cpu_to_le32(sel_val);
  711. *buffer++ = cpu_to_le32(data);
  712. sel_val += pollrd->sel_val_stride;
  713. }
  714. return pollrd->no_ops * (2 * sizeof(u32));
  715. }
  716. static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
  717. struct qlcnic_dump_entry *entry, __le32 *buffer)
  718. {
  719. struct __mux2 *mux2 = &entry->region.mux2;
  720. u32 data;
  721. u32 t_sel_val, sel_val1, sel_val2;
  722. int i;
  723. sel_val1 = mux2->sel_val1;
  724. sel_val2 = mux2->sel_val2;
  725. for (i = 0; i < mux2->no_ops; i++) {
  726. qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
  727. t_sel_val = sel_val1 & mux2->sel_val_mask;
  728. qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
  729. data = qlcnic_ind_rd(adapter, mux2->read_addr);
  730. *buffer++ = cpu_to_le32(t_sel_val);
  731. *buffer++ = cpu_to_le32(data);
  732. qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
  733. t_sel_val = sel_val2 & mux2->sel_val_mask;
  734. qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
  735. data = qlcnic_ind_rd(adapter, mux2->read_addr);
  736. *buffer++ = cpu_to_le32(t_sel_val);
  737. *buffer++ = cpu_to_le32(data);
  738. sel_val1 += mux2->sel_val_stride;
  739. sel_val2 += mux2->sel_val_stride;
  740. }
  741. return mux2->no_ops * (4 * sizeof(u32));
  742. }
  743. static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
  744. struct qlcnic_dump_entry *entry, __le32 *buffer)
  745. {
  746. u32 fl_addr, size;
  747. struct __mem *rom = &entry->region.mem;
  748. fl_addr = rom->addr;
  749. size = rom->size / 4;
  750. if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
  751. (u8 *)buffer, size))
  752. return rom->size;
  753. return 0;
  754. }
  755. static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
  756. {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
  757. {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
  758. {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
  759. {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
  760. {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
  761. {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
  762. {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
  763. {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
  764. {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
  765. {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
  766. {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
  767. {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
  768. {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
  769. {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
  770. {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
  771. {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
  772. {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
  773. {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
  774. {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
  775. {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
  776. };
  777. static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
  778. {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
  779. {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
  780. {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
  781. {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
  782. {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
  783. {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
  784. {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
  785. {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
  786. {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
  787. {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
  788. {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
  789. {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
  790. {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
  791. {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
  792. {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
  793. {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
  794. {QLCNIC_READ_MUX2, qlcnic_read_mux2},
  795. {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
  796. {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
  797. {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
  798. {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
  799. {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
  800. {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
  801. };
  802. static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
  803. {
  804. uint64_t sum = 0;
  805. int count = temp_size / sizeof(uint32_t);
  806. while (count-- > 0)
  807. sum += *temp_buffer++;
  808. while (sum >> 32)
  809. sum = (sum & 0xFFFFFFFF) + (sum >> 32);
  810. return ~sum;
  811. }
  812. static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
  813. u8 *buffer, u32 size)
  814. {
  815. int ret = 0;
  816. if (qlcnic_82xx_check(adapter))
  817. return -EIO;
  818. if (qlcnic_83xx_lock_flash(adapter))
  819. return -EIO;
  820. ret = qlcnic_83xx_lockless_flash_read32(adapter,
  821. QLC_83XX_MINIDUMP_FLASH,
  822. buffer, size / sizeof(u32));
  823. qlcnic_83xx_unlock_flash(adapter);
  824. return ret;
  825. }
  826. static int
  827. qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
  828. struct qlcnic_cmd_args *cmd)
  829. {
  830. struct qlcnic_dump_template_hdr tmp_hdr;
  831. u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32);
  832. int ret = 0;
  833. if (qlcnic_82xx_check(adapter))
  834. return -EIO;
  835. if (qlcnic_83xx_lock_flash(adapter))
  836. return -EIO;
  837. ret = qlcnic_83xx_lockless_flash_read32(adapter,
  838. QLC_83XX_MINIDUMP_FLASH,
  839. (u8 *)&tmp_hdr, size);
  840. qlcnic_83xx_unlock_flash(adapter);
  841. cmd->rsp.arg[2] = tmp_hdr.size;
  842. cmd->rsp.arg[3] = tmp_hdr.version;
  843. return ret;
  844. }
  845. static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
  846. u32 *version, u32 *temp_size,
  847. u8 *use_flash_temp)
  848. {
  849. int err = 0;
  850. struct qlcnic_cmd_args cmd;
  851. if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
  852. return -ENOMEM;
  853. err = qlcnic_issue_cmd(adapter, &cmd);
  854. if (err != QLCNIC_RCODE_SUCCESS) {
  855. if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
  856. qlcnic_free_mbx_args(&cmd);
  857. return -EIO;
  858. }
  859. *use_flash_temp = 1;
  860. }
  861. *temp_size = cmd.rsp.arg[2];
  862. *version = cmd.rsp.arg[3];
  863. qlcnic_free_mbx_args(&cmd);
  864. if (!(*temp_size))
  865. return -EIO;
  866. return 0;
  867. }
  868. static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
  869. u32 *buffer, u32 temp_size)
  870. {
  871. int err = 0, i;
  872. void *tmp_addr;
  873. __le32 *tmp_buf;
  874. struct qlcnic_cmd_args cmd;
  875. dma_addr_t tmp_addr_t = 0;
  876. tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
  877. &tmp_addr_t, GFP_KERNEL);
  878. if (!tmp_addr)
  879. return -ENOMEM;
  880. if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
  881. err = -ENOMEM;
  882. goto free_mem;
  883. }
  884. cmd.req.arg[1] = LSD(tmp_addr_t);
  885. cmd.req.arg[2] = MSD(tmp_addr_t);
  886. cmd.req.arg[3] = temp_size;
  887. err = qlcnic_issue_cmd(adapter, &cmd);
  888. tmp_buf = tmp_addr;
  889. if (err == QLCNIC_RCODE_SUCCESS) {
  890. for (i = 0; i < temp_size / sizeof(u32); i++)
  891. *buffer++ = __le32_to_cpu(*tmp_buf++);
  892. }
  893. qlcnic_free_mbx_args(&cmd);
  894. free_mem:
  895. dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
  896. return err;
  897. }
  898. int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
  899. {
  900. int err;
  901. u32 temp_size = 0;
  902. u32 version, csum, *tmp_buf;
  903. struct qlcnic_hardware_context *ahw;
  904. struct qlcnic_dump_template_hdr *tmpl_hdr;
  905. u8 use_flash_temp = 0;
  906. ahw = adapter->ahw;
  907. err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
  908. &use_flash_temp);
  909. if (err) {
  910. dev_err(&adapter->pdev->dev,
  911. "Can't get template size %d\n", err);
  912. return -EIO;
  913. }
  914. ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
  915. if (!ahw->fw_dump.tmpl_hdr)
  916. return -ENOMEM;
  917. tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr;
  918. if (use_flash_temp)
  919. goto flash_temp;
  920. err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
  921. if (err) {
  922. flash_temp:
  923. err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
  924. temp_size);
  925. if (err) {
  926. dev_err(&adapter->pdev->dev,
  927. "Failed to get minidump template header %d\n",
  928. err);
  929. vfree(ahw->fw_dump.tmpl_hdr);
  930. ahw->fw_dump.tmpl_hdr = NULL;
  931. return -EIO;
  932. }
  933. }
  934. csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
  935. if (csum) {
  936. dev_err(&adapter->pdev->dev,
  937. "Template header checksum validation failed\n");
  938. vfree(ahw->fw_dump.tmpl_hdr);
  939. ahw->fw_dump.tmpl_hdr = NULL;
  940. return -EIO;
  941. }
  942. tmpl_hdr = ahw->fw_dump.tmpl_hdr;
  943. tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
  944. dev_info(&adapter->pdev->dev,
  945. "Default minidump capture mask 0x%x\n",
  946. tmpl_hdr->cap_mask);
  947. if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
  948. ahw->fw_dump.use_pex_dma = true;
  949. else
  950. ahw->fw_dump.use_pex_dma = false;
  951. qlcnic_enable_fw_dump_state(adapter);
  952. return 0;
  953. }
  954. int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
  955. {
  956. __le32 *buffer;
  957. u32 ocm_window;
  958. char mesg[64];
  959. char *msg[] = {mesg, NULL};
  960. int i, k, ops_cnt, ops_index, dump_size = 0;
  961. u32 entry_offset, dump, no_entries, buf_offset = 0;
  962. struct qlcnic_dump_entry *entry;
  963. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  964. struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
  965. static const struct qlcnic_dump_operations *fw_dump_ops;
  966. struct device *dev = &adapter->pdev->dev;
  967. struct qlcnic_hardware_context *ahw;
  968. void *temp_buffer;
  969. ahw = adapter->ahw;
  970. /* Return if we don't have firmware dump template header */
  971. if (!tmpl_hdr)
  972. return -EIO;
  973. if (!qlcnic_check_fw_dump_state(adapter)) {
  974. dev_info(&adapter->pdev->dev, "Dump not enabled\n");
  975. return -EIO;
  976. }
  977. if (fw_dump->clr) {
  978. dev_info(&adapter->pdev->dev,
  979. "Previous dump not cleared, not capturing dump\n");
  980. return -EIO;
  981. }
  982. netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
  983. /* Calculate the size for dump data area only */
  984. for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
  985. if (i & tmpl_hdr->drv_cap_mask)
  986. dump_size += tmpl_hdr->cap_sizes[k];
  987. if (!dump_size)
  988. return -EIO;
  989. fw_dump->data = vzalloc(dump_size);
  990. if (!fw_dump->data)
  991. return -ENOMEM;
  992. buffer = fw_dump->data;
  993. fw_dump->size = dump_size;
  994. no_entries = tmpl_hdr->num_entries;
  995. entry_offset = tmpl_hdr->offset;
  996. tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
  997. tmpl_hdr->sys_info[1] = adapter->fw_version;
  998. if (fw_dump->use_pex_dma) {
  999. temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
  1000. &fw_dump->phys_addr,
  1001. GFP_KERNEL);
  1002. if (!temp_buffer)
  1003. fw_dump->use_pex_dma = false;
  1004. else
  1005. fw_dump->dma_buffer = temp_buffer;
  1006. }
  1007. if (qlcnic_82xx_check(adapter)) {
  1008. ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
  1009. fw_dump_ops = qlcnic_fw_dump_ops;
  1010. } else {
  1011. ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
  1012. fw_dump_ops = qlcnic_83xx_fw_dump_ops;
  1013. ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func];
  1014. tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
  1015. tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
  1016. }
  1017. for (i = 0; i < no_entries; i++) {
  1018. entry = (void *)tmpl_hdr + entry_offset;
  1019. if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
  1020. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1021. entry_offset += entry->hdr.offset;
  1022. continue;
  1023. }
  1024. /* Find the handler for this entry */
  1025. ops_index = 0;
  1026. while (ops_index < ops_cnt) {
  1027. if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
  1028. break;
  1029. ops_index++;
  1030. }
  1031. if (ops_index == ops_cnt) {
  1032. dev_info(&adapter->pdev->dev,
  1033. "Invalid entry type %d, exiting dump\n",
  1034. entry->hdr.type);
  1035. goto error;
  1036. }
  1037. /* Collect dump for this entry */
  1038. dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
  1039. if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
  1040. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1041. buf_offset += entry->hdr.cap_size;
  1042. entry_offset += entry->hdr.offset;
  1043. buffer = fw_dump->data + buf_offset;
  1044. }
  1045. if (dump_size != buf_offset) {
  1046. dev_info(&adapter->pdev->dev,
  1047. "Captured(%d) and expected size(%d) do not match\n",
  1048. buf_offset, dump_size);
  1049. goto error;
  1050. } else {
  1051. fw_dump->clr = 1;
  1052. snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
  1053. adapter->netdev->name);
  1054. dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
  1055. adapter->netdev->name, fw_dump->size);
  1056. /* Send a udev event to notify availability of FW dump */
  1057. kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
  1058. return 0;
  1059. }
  1060. error:
  1061. if (fw_dump->use_pex_dma)
  1062. dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
  1063. fw_dump->dma_buffer, fw_dump->phys_addr);
  1064. vfree(fw_dump->data);
  1065. return -EINVAL;
  1066. }
  1067. void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
  1068. {
  1069. u32 prev_version, current_version;
  1070. struct qlcnic_hardware_context *ahw = adapter->ahw;
  1071. struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
  1072. struct pci_dev *pdev = adapter->pdev;
  1073. prev_version = adapter->fw_version;
  1074. current_version = qlcnic_83xx_get_fw_version(adapter);
  1075. if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
  1076. if (fw_dump->tmpl_hdr)
  1077. vfree(fw_dump->tmpl_hdr);
  1078. if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
  1079. dev_info(&pdev->dev, "Supports FW dump capability\n");
  1080. }
  1081. }