qlcnic_minidump.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028
  1. /*
  2. * QLogic qlcnic NIC Driver
  3. * Copyright (c) 2009-2013 QLogic Corporation
  4. *
  5. * See LICENSE.qlcnic for copyright and licensing details.
  6. */
  7. #include "qlcnic.h"
  8. #include "qlcnic_hdr.h"
  9. #include "qlcnic_83xx_hw.h"
  10. #include "qlcnic_hw.h"
  11. #include <net/ip.h>
  12. #define QLC_83XX_MINIDUMP_FLASH 0x520000
  13. #define QLC_83XX_OCM_INDEX 3
  14. #define QLC_83XX_PCI_INDEX 0
  15. static const u32 qlcnic_ms_read_data[] = {
  16. 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
  17. };
  18. #define QLCNIC_DUMP_WCRB BIT_0
  19. #define QLCNIC_DUMP_RWCRB BIT_1
  20. #define QLCNIC_DUMP_ANDCRB BIT_2
  21. #define QLCNIC_DUMP_ORCRB BIT_3
  22. #define QLCNIC_DUMP_POLLCRB BIT_4
  23. #define QLCNIC_DUMP_RD_SAVE BIT_5
  24. #define QLCNIC_DUMP_WRT_SAVED BIT_6
  25. #define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
  26. #define QLCNIC_DUMP_SKIP BIT_7
  27. #define QLCNIC_DUMP_MASK_MAX 0xff
  28. struct qlcnic_common_entry_hdr {
  29. u32 type;
  30. u32 offset;
  31. u32 cap_size;
  32. u8 mask;
  33. u8 rsvd[2];
  34. u8 flags;
  35. } __packed;
  36. struct __crb {
  37. u32 addr;
  38. u8 stride;
  39. u8 rsvd1[3];
  40. u32 data_size;
  41. u32 no_ops;
  42. u32 rsvd2[4];
  43. } __packed;
  44. struct __ctrl {
  45. u32 addr;
  46. u8 stride;
  47. u8 index_a;
  48. u16 timeout;
  49. u32 data_size;
  50. u32 no_ops;
  51. u8 opcode;
  52. u8 index_v;
  53. u8 shl_val;
  54. u8 shr_val;
  55. u32 val1;
  56. u32 val2;
  57. u32 val3;
  58. } __packed;
  59. struct __cache {
  60. u32 addr;
  61. u16 stride;
  62. u16 init_tag_val;
  63. u32 size;
  64. u32 no_ops;
  65. u32 ctrl_addr;
  66. u32 ctrl_val;
  67. u32 read_addr;
  68. u8 read_addr_stride;
  69. u8 read_addr_num;
  70. u8 rsvd1[2];
  71. } __packed;
  72. struct __ocm {
  73. u8 rsvd[8];
  74. u32 size;
  75. u32 no_ops;
  76. u8 rsvd1[8];
  77. u32 read_addr;
  78. u32 read_addr_stride;
  79. } __packed;
  80. struct __mem {
  81. u8 rsvd[24];
  82. u32 addr;
  83. u32 size;
  84. } __packed;
  85. struct __mux {
  86. u32 addr;
  87. u8 rsvd[4];
  88. u32 size;
  89. u32 no_ops;
  90. u32 val;
  91. u32 val_stride;
  92. u32 read_addr;
  93. u8 rsvd2[4];
  94. } __packed;
  95. struct __queue {
  96. u32 sel_addr;
  97. u16 stride;
  98. u8 rsvd[2];
  99. u32 size;
  100. u32 no_ops;
  101. u8 rsvd2[8];
  102. u32 read_addr;
  103. u8 read_addr_stride;
  104. u8 read_addr_cnt;
  105. u8 rsvd3[2];
  106. } __packed;
  107. struct __pollrd {
  108. u32 sel_addr;
  109. u32 read_addr;
  110. u32 sel_val;
  111. u16 sel_val_stride;
  112. u16 no_ops;
  113. u32 poll_wait;
  114. u32 poll_mask;
  115. u32 data_size;
  116. u8 rsvd[4];
  117. } __packed;
  118. struct __mux2 {
  119. u32 sel_addr1;
  120. u32 sel_addr2;
  121. u32 sel_val1;
  122. u32 sel_val2;
  123. u32 no_ops;
  124. u32 sel_val_mask;
  125. u32 read_addr;
  126. u8 sel_val_stride;
  127. u8 data_size;
  128. u8 rsvd[2];
  129. } __packed;
  130. struct __pollrdmwr {
  131. u32 addr1;
  132. u32 addr2;
  133. u32 val1;
  134. u32 val2;
  135. u32 poll_wait;
  136. u32 poll_mask;
  137. u32 mod_mask;
  138. u32 data_size;
  139. } __packed;
  140. struct qlcnic_dump_entry {
  141. struct qlcnic_common_entry_hdr hdr;
  142. union {
  143. struct __crb crb;
  144. struct __cache cache;
  145. struct __ocm ocm;
  146. struct __mem mem;
  147. struct __mux mux;
  148. struct __queue que;
  149. struct __ctrl ctrl;
  150. struct __pollrdmwr pollrdmwr;
  151. struct __mux2 mux2;
  152. struct __pollrd pollrd;
  153. } region;
  154. } __packed;
  155. enum qlcnic_minidump_opcode {
  156. QLCNIC_DUMP_NOP = 0,
  157. QLCNIC_DUMP_READ_CRB = 1,
  158. QLCNIC_DUMP_READ_MUX = 2,
  159. QLCNIC_DUMP_QUEUE = 3,
  160. QLCNIC_DUMP_BRD_CONFIG = 4,
  161. QLCNIC_DUMP_READ_OCM = 6,
  162. QLCNIC_DUMP_PEG_REG = 7,
  163. QLCNIC_DUMP_L1_DTAG = 8,
  164. QLCNIC_DUMP_L1_ITAG = 9,
  165. QLCNIC_DUMP_L1_DATA = 11,
  166. QLCNIC_DUMP_L1_INST = 12,
  167. QLCNIC_DUMP_L2_DTAG = 21,
  168. QLCNIC_DUMP_L2_ITAG = 22,
  169. QLCNIC_DUMP_L2_DATA = 23,
  170. QLCNIC_DUMP_L2_INST = 24,
  171. QLCNIC_DUMP_POLL_RD = 35,
  172. QLCNIC_READ_MUX2 = 36,
  173. QLCNIC_READ_POLLRDMWR = 37,
  174. QLCNIC_DUMP_READ_ROM = 71,
  175. QLCNIC_DUMP_READ_MEM = 72,
  176. QLCNIC_DUMP_READ_CTRL = 98,
  177. QLCNIC_DUMP_TLHDR = 99,
  178. QLCNIC_DUMP_RDEND = 255
  179. };
  180. struct qlcnic_dump_operations {
  181. enum qlcnic_minidump_opcode opcode;
  182. u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
  183. __le32 *);
  184. };
  185. static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
  186. struct qlcnic_dump_entry *entry, __le32 *buffer)
  187. {
  188. int i;
  189. u32 addr, data;
  190. struct __crb *crb = &entry->region.crb;
  191. addr = crb->addr;
  192. for (i = 0; i < crb->no_ops; i++) {
  193. data = qlcnic_ind_rd(adapter, addr);
  194. *buffer++ = cpu_to_le32(addr);
  195. *buffer++ = cpu_to_le32(data);
  196. addr += crb->stride;
  197. }
  198. return crb->no_ops * 2 * sizeof(u32);
  199. }
  200. static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
  201. struct qlcnic_dump_entry *entry, __le32 *buffer)
  202. {
  203. int i, k, timeout = 0;
  204. u32 addr, data;
  205. u8 no_ops;
  206. struct __ctrl *ctr = &entry->region.ctrl;
  207. struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
  208. addr = ctr->addr;
  209. no_ops = ctr->no_ops;
  210. for (i = 0; i < no_ops; i++) {
  211. k = 0;
  212. for (k = 0; k < 8; k++) {
  213. if (!(ctr->opcode & (1 << k)))
  214. continue;
  215. switch (1 << k) {
  216. case QLCNIC_DUMP_WCRB:
  217. qlcnic_ind_wr(adapter, addr, ctr->val1);
  218. break;
  219. case QLCNIC_DUMP_RWCRB:
  220. data = qlcnic_ind_rd(adapter, addr);
  221. qlcnic_ind_wr(adapter, addr, data);
  222. break;
  223. case QLCNIC_DUMP_ANDCRB:
  224. data = qlcnic_ind_rd(adapter, addr);
  225. qlcnic_ind_wr(adapter, addr,
  226. (data & ctr->val2));
  227. break;
  228. case QLCNIC_DUMP_ORCRB:
  229. data = qlcnic_ind_rd(adapter, addr);
  230. qlcnic_ind_wr(adapter, addr,
  231. (data | ctr->val3));
  232. break;
  233. case QLCNIC_DUMP_POLLCRB:
  234. while (timeout <= ctr->timeout) {
  235. data = qlcnic_ind_rd(adapter, addr);
  236. if ((data & ctr->val2) == ctr->val1)
  237. break;
  238. usleep_range(1000, 2000);
  239. timeout++;
  240. }
  241. if (timeout > ctr->timeout) {
  242. dev_info(&adapter->pdev->dev,
  243. "Timed out, aborting poll CRB\n");
  244. return -EINVAL;
  245. }
  246. break;
  247. case QLCNIC_DUMP_RD_SAVE:
  248. if (ctr->index_a)
  249. addr = t_hdr->saved_state[ctr->index_a];
  250. data = qlcnic_ind_rd(adapter, addr);
  251. t_hdr->saved_state[ctr->index_v] = data;
  252. break;
  253. case QLCNIC_DUMP_WRT_SAVED:
  254. if (ctr->index_v)
  255. data = t_hdr->saved_state[ctr->index_v];
  256. else
  257. data = ctr->val1;
  258. if (ctr->index_a)
  259. addr = t_hdr->saved_state[ctr->index_a];
  260. qlcnic_ind_wr(adapter, addr, data);
  261. break;
  262. case QLCNIC_DUMP_MOD_SAVE_ST:
  263. data = t_hdr->saved_state[ctr->index_v];
  264. data <<= ctr->shl_val;
  265. data >>= ctr->shr_val;
  266. if (ctr->val2)
  267. data &= ctr->val2;
  268. data |= ctr->val3;
  269. data += ctr->val1;
  270. t_hdr->saved_state[ctr->index_v] = data;
  271. break;
  272. default:
  273. dev_info(&adapter->pdev->dev,
  274. "Unknown opcode\n");
  275. break;
  276. }
  277. }
  278. addr += ctr->stride;
  279. }
  280. return 0;
  281. }
  282. static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
  283. struct qlcnic_dump_entry *entry, __le32 *buffer)
  284. {
  285. int loop;
  286. u32 val, data = 0;
  287. struct __mux *mux = &entry->region.mux;
  288. val = mux->val;
  289. for (loop = 0; loop < mux->no_ops; loop++) {
  290. qlcnic_ind_wr(adapter, mux->addr, val);
  291. data = qlcnic_ind_rd(adapter, mux->read_addr);
  292. *buffer++ = cpu_to_le32(val);
  293. *buffer++ = cpu_to_le32(data);
  294. val += mux->val_stride;
  295. }
  296. return 2 * mux->no_ops * sizeof(u32);
  297. }
  298. static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
  299. struct qlcnic_dump_entry *entry, __le32 *buffer)
  300. {
  301. int i, loop;
  302. u32 cnt, addr, data, que_id = 0;
  303. struct __queue *que = &entry->region.que;
  304. addr = que->read_addr;
  305. cnt = que->read_addr_cnt;
  306. for (loop = 0; loop < que->no_ops; loop++) {
  307. qlcnic_ind_wr(adapter, que->sel_addr, que_id);
  308. addr = que->read_addr;
  309. for (i = 0; i < cnt; i++) {
  310. data = qlcnic_ind_rd(adapter, addr);
  311. *buffer++ = cpu_to_le32(data);
  312. addr += que->read_addr_stride;
  313. }
  314. que_id += que->stride;
  315. }
  316. return que->no_ops * cnt * sizeof(u32);
  317. }
  318. static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
  319. struct qlcnic_dump_entry *entry, __le32 *buffer)
  320. {
  321. int i;
  322. u32 data;
  323. void __iomem *addr;
  324. struct __ocm *ocm = &entry->region.ocm;
  325. addr = adapter->ahw->pci_base0 + ocm->read_addr;
  326. for (i = 0; i < ocm->no_ops; i++) {
  327. data = readl(addr);
  328. *buffer++ = cpu_to_le32(data);
  329. addr += ocm->read_addr_stride;
  330. }
  331. return ocm->no_ops * sizeof(u32);
  332. }
  333. static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
  334. struct qlcnic_dump_entry *entry, __le32 *buffer)
  335. {
  336. int i, count = 0;
  337. u32 fl_addr, size, val, lck_val, addr;
  338. struct __mem *rom = &entry->region.mem;
  339. fl_addr = rom->addr;
  340. size = rom->size / 4;
  341. lock_try:
  342. lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
  343. if (!lck_val && count < MAX_CTL_CHECK) {
  344. usleep_range(10000, 11000);
  345. count++;
  346. goto lock_try;
  347. }
  348. QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
  349. adapter->ahw->pci_func);
  350. for (i = 0; i < size; i++) {
  351. addr = fl_addr & 0xFFFF0000;
  352. qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
  353. addr = LSW(fl_addr) + FLASH_ROM_DATA;
  354. val = qlcnic_ind_rd(adapter, addr);
  355. fl_addr += 4;
  356. *buffer++ = cpu_to_le32(val);
  357. }
  358. QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
  359. return rom->size;
  360. }
  361. static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
  362. struct qlcnic_dump_entry *entry, __le32 *buffer)
  363. {
  364. int i;
  365. u32 cnt, val, data, addr;
  366. struct __cache *l1 = &entry->region.cache;
  367. val = l1->init_tag_val;
  368. for (i = 0; i < l1->no_ops; i++) {
  369. qlcnic_ind_wr(adapter, l1->addr, val);
  370. qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
  371. addr = l1->read_addr;
  372. cnt = l1->read_addr_num;
  373. while (cnt) {
  374. data = qlcnic_ind_rd(adapter, addr);
  375. *buffer++ = cpu_to_le32(data);
  376. addr += l1->read_addr_stride;
  377. cnt--;
  378. }
  379. val += l1->stride;
  380. }
  381. return l1->no_ops * l1->read_addr_num * sizeof(u32);
  382. }
  383. static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
  384. struct qlcnic_dump_entry *entry, __le32 *buffer)
  385. {
  386. int i;
  387. u32 cnt, val, data, addr;
  388. u8 poll_mask, poll_to, time_out = 0;
  389. struct __cache *l2 = &entry->region.cache;
  390. val = l2->init_tag_val;
  391. poll_mask = LSB(MSW(l2->ctrl_val));
  392. poll_to = MSB(MSW(l2->ctrl_val));
  393. for (i = 0; i < l2->no_ops; i++) {
  394. qlcnic_ind_wr(adapter, l2->addr, val);
  395. if (LSW(l2->ctrl_val))
  396. qlcnic_ind_wr(adapter, l2->ctrl_addr,
  397. LSW(l2->ctrl_val));
  398. if (!poll_mask)
  399. goto skip_poll;
  400. do {
  401. data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
  402. if (!(data & poll_mask))
  403. break;
  404. usleep_range(1000, 2000);
  405. time_out++;
  406. } while (time_out <= poll_to);
  407. if (time_out > poll_to) {
  408. dev_err(&adapter->pdev->dev,
  409. "Timeout exceeded in %s, aborting dump\n",
  410. __func__);
  411. return -EINVAL;
  412. }
  413. skip_poll:
  414. addr = l2->read_addr;
  415. cnt = l2->read_addr_num;
  416. while (cnt) {
  417. data = qlcnic_ind_rd(adapter, addr);
  418. *buffer++ = cpu_to_le32(data);
  419. addr += l2->read_addr_stride;
  420. cnt--;
  421. }
  422. val += l2->stride;
  423. }
  424. return l2->no_ops * l2->read_addr_num * sizeof(u32);
  425. }
  426. static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
  427. struct qlcnic_dump_entry *entry, __le32 *buffer)
  428. {
  429. u32 addr, data, test, ret = 0;
  430. int i, reg_read;
  431. struct __mem *mem = &entry->region.mem;
  432. reg_read = mem->size;
  433. addr = mem->addr;
  434. /* check for data size of multiple of 16 and 16 byte alignment */
  435. if ((addr & 0xf) || (reg_read%16)) {
  436. dev_info(&adapter->pdev->dev,
  437. "Unaligned memory addr:0x%x size:0x%x\n",
  438. addr, reg_read);
  439. return -EINVAL;
  440. }
  441. mutex_lock(&adapter->ahw->mem_lock);
  442. while (reg_read != 0) {
  443. qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
  444. qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
  445. qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
  446. for (i = 0; i < MAX_CTL_CHECK; i++) {
  447. test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
  448. if (!(test & TA_CTL_BUSY))
  449. break;
  450. }
  451. if (i == MAX_CTL_CHECK) {
  452. if (printk_ratelimit()) {
  453. dev_err(&adapter->pdev->dev,
  454. "failed to read through agent\n");
  455. ret = -EINVAL;
  456. goto out;
  457. }
  458. }
  459. for (i = 0; i < 4; i++) {
  460. data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
  461. *buffer++ = cpu_to_le32(data);
  462. }
  463. addr += 16;
  464. reg_read -= 16;
  465. ret += 16;
  466. }
  467. out:
  468. mutex_unlock(&adapter->ahw->mem_lock);
  469. return mem->size;
  470. }
  471. static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
  472. struct qlcnic_dump_entry *entry, __le32 *buffer)
  473. {
  474. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  475. return 0;
  476. }
  477. static int qlcnic_valid_dump_entry(struct device *dev,
  478. struct qlcnic_dump_entry *entry, u32 size)
  479. {
  480. int ret = 1;
  481. if (size != entry->hdr.cap_size) {
  482. dev_err(dev,
  483. "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
  484. entry->hdr.type, entry->hdr.mask, size,
  485. entry->hdr.cap_size);
  486. ret = 0;
  487. }
  488. return ret;
  489. }
  490. static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
  491. struct qlcnic_dump_entry *entry,
  492. __le32 *buffer)
  493. {
  494. struct __pollrdmwr *poll = &entry->region.pollrdmwr;
  495. u32 data, wait_count, poll_wait, temp;
  496. poll_wait = poll->poll_wait;
  497. qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
  498. wait_count = 0;
  499. while (wait_count < poll_wait) {
  500. data = qlcnic_ind_rd(adapter, poll->addr1);
  501. if ((data & poll->poll_mask) != 0)
  502. break;
  503. wait_count++;
  504. }
  505. if (wait_count == poll_wait) {
  506. dev_err(&adapter->pdev->dev,
  507. "Timeout exceeded in %s, aborting dump\n",
  508. __func__);
  509. return 0;
  510. }
  511. data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
  512. qlcnic_ind_wr(adapter, poll->addr2, data);
  513. qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
  514. wait_count = 0;
  515. while (wait_count < poll_wait) {
  516. temp = qlcnic_ind_rd(adapter, poll->addr1);
  517. if ((temp & poll->poll_mask) != 0)
  518. break;
  519. wait_count++;
  520. }
  521. *buffer++ = cpu_to_le32(poll->addr2);
  522. *buffer++ = cpu_to_le32(data);
  523. return 2 * sizeof(u32);
  524. }
  525. static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
  526. struct qlcnic_dump_entry *entry, __le32 *buffer)
  527. {
  528. struct __pollrd *pollrd = &entry->region.pollrd;
  529. u32 data, wait_count, poll_wait, sel_val;
  530. int i;
  531. poll_wait = pollrd->poll_wait;
  532. sel_val = pollrd->sel_val;
  533. for (i = 0; i < pollrd->no_ops; i++) {
  534. qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
  535. wait_count = 0;
  536. while (wait_count < poll_wait) {
  537. data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
  538. if ((data & pollrd->poll_mask) != 0)
  539. break;
  540. wait_count++;
  541. }
  542. if (wait_count == poll_wait) {
  543. dev_err(&adapter->pdev->dev,
  544. "Timeout exceeded in %s, aborting dump\n",
  545. __func__);
  546. return 0;
  547. }
  548. data = qlcnic_ind_rd(adapter, pollrd->read_addr);
  549. *buffer++ = cpu_to_le32(sel_val);
  550. *buffer++ = cpu_to_le32(data);
  551. sel_val += pollrd->sel_val_stride;
  552. }
  553. return pollrd->no_ops * (2 * sizeof(u32));
  554. }
  555. static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
  556. struct qlcnic_dump_entry *entry, __le32 *buffer)
  557. {
  558. struct __mux2 *mux2 = &entry->region.mux2;
  559. u32 data;
  560. u32 t_sel_val, sel_val1, sel_val2;
  561. int i;
  562. sel_val1 = mux2->sel_val1;
  563. sel_val2 = mux2->sel_val2;
  564. for (i = 0; i < mux2->no_ops; i++) {
  565. qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
  566. t_sel_val = sel_val1 & mux2->sel_val_mask;
  567. qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
  568. data = qlcnic_ind_rd(adapter, mux2->read_addr);
  569. *buffer++ = cpu_to_le32(t_sel_val);
  570. *buffer++ = cpu_to_le32(data);
  571. qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
  572. t_sel_val = sel_val2 & mux2->sel_val_mask;
  573. qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
  574. data = qlcnic_ind_rd(adapter, mux2->read_addr);
  575. *buffer++ = cpu_to_le32(t_sel_val);
  576. *buffer++ = cpu_to_le32(data);
  577. sel_val1 += mux2->sel_val_stride;
  578. sel_val2 += mux2->sel_val_stride;
  579. }
  580. return mux2->no_ops * (4 * sizeof(u32));
  581. }
  582. static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
  583. struct qlcnic_dump_entry *entry, __le32 *buffer)
  584. {
  585. u32 fl_addr, size;
  586. struct __mem *rom = &entry->region.mem;
  587. fl_addr = rom->addr;
  588. size = rom->size / 4;
  589. if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
  590. (u8 *)buffer, size))
  591. return rom->size;
  592. return 0;
  593. }
  594. static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
  595. {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
  596. {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
  597. {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
  598. {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
  599. {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
  600. {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
  601. {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
  602. {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
  603. {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
  604. {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
  605. {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
  606. {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
  607. {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
  608. {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
  609. {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
  610. {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
  611. {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
  612. {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
  613. {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
  614. {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
  615. };
  616. static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
  617. {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
  618. {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
  619. {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
  620. {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
  621. {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
  622. {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
  623. {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
  624. {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
  625. {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
  626. {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
  627. {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
  628. {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
  629. {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
  630. {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
  631. {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
  632. {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
  633. {QLCNIC_READ_MUX2, qlcnic_read_mux2},
  634. {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
  635. {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
  636. {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
  637. {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
  638. {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
  639. {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
  640. };
  641. static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
  642. {
  643. uint64_t sum = 0;
  644. int count = temp_size / sizeof(uint32_t);
  645. while (count-- > 0)
  646. sum += *temp_buffer++;
  647. while (sum >> 32)
  648. sum = (sum & 0xFFFFFFFF) + (sum >> 32);
  649. return ~sum;
  650. }
  651. static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
  652. u8 *buffer, u32 size)
  653. {
  654. int ret = 0;
  655. if (qlcnic_82xx_check(adapter))
  656. return -EIO;
  657. if (qlcnic_83xx_lock_flash(adapter))
  658. return -EIO;
  659. ret = qlcnic_83xx_lockless_flash_read32(adapter,
  660. QLC_83XX_MINIDUMP_FLASH,
  661. buffer, size / sizeof(u32));
  662. qlcnic_83xx_unlock_flash(adapter);
  663. return ret;
  664. }
  665. static int
  666. qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
  667. struct qlcnic_cmd_args *cmd)
  668. {
  669. struct qlcnic_dump_template_hdr tmp_hdr;
  670. u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32);
  671. int ret = 0;
  672. if (qlcnic_82xx_check(adapter))
  673. return -EIO;
  674. if (qlcnic_83xx_lock_flash(adapter))
  675. return -EIO;
  676. ret = qlcnic_83xx_lockless_flash_read32(adapter,
  677. QLC_83XX_MINIDUMP_FLASH,
  678. (u8 *)&tmp_hdr, size);
  679. qlcnic_83xx_unlock_flash(adapter);
  680. cmd->rsp.arg[2] = tmp_hdr.size;
  681. cmd->rsp.arg[3] = tmp_hdr.version;
  682. return ret;
  683. }
  684. static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
  685. u32 *version, u32 *temp_size,
  686. u8 *use_flash_temp)
  687. {
  688. int err = 0;
  689. struct qlcnic_cmd_args cmd;
  690. if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
  691. return -ENOMEM;
  692. err = qlcnic_issue_cmd(adapter, &cmd);
  693. if (err != QLCNIC_RCODE_SUCCESS) {
  694. if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
  695. qlcnic_free_mbx_args(&cmd);
  696. return -EIO;
  697. }
  698. *use_flash_temp = 1;
  699. }
  700. *temp_size = cmd.rsp.arg[2];
  701. *version = cmd.rsp.arg[3];
  702. qlcnic_free_mbx_args(&cmd);
  703. if (!(*temp_size))
  704. return -EIO;
  705. return 0;
  706. }
  707. static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
  708. u32 *buffer, u32 temp_size)
  709. {
  710. int err = 0, i;
  711. void *tmp_addr;
  712. __le32 *tmp_buf;
  713. struct qlcnic_cmd_args cmd;
  714. dma_addr_t tmp_addr_t = 0;
  715. tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
  716. &tmp_addr_t, GFP_KERNEL);
  717. if (!tmp_addr) {
  718. dev_err(&adapter->pdev->dev,
  719. "Can't get memory for FW dump template\n");
  720. return -ENOMEM;
  721. }
  722. if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
  723. err = -ENOMEM;
  724. goto free_mem;
  725. }
  726. cmd.req.arg[1] = LSD(tmp_addr_t);
  727. cmd.req.arg[2] = MSD(tmp_addr_t);
  728. cmd.req.arg[3] = temp_size;
  729. err = qlcnic_issue_cmd(adapter, &cmd);
  730. tmp_buf = tmp_addr;
  731. if (err == QLCNIC_RCODE_SUCCESS) {
  732. for (i = 0; i < temp_size / sizeof(u32); i++)
  733. *buffer++ = __le32_to_cpu(*tmp_buf++);
  734. }
  735. qlcnic_free_mbx_args(&cmd);
  736. free_mem:
  737. dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
  738. return err;
  739. }
  740. int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
  741. {
  742. int err;
  743. u32 temp_size = 0;
  744. u32 version, csum, *tmp_buf;
  745. struct qlcnic_hardware_context *ahw;
  746. struct qlcnic_dump_template_hdr *tmpl_hdr;
  747. u8 use_flash_temp = 0;
  748. ahw = adapter->ahw;
  749. err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
  750. &use_flash_temp);
  751. if (err) {
  752. dev_err(&adapter->pdev->dev,
  753. "Can't get template size %d\n", err);
  754. return -EIO;
  755. }
  756. ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
  757. if (!ahw->fw_dump.tmpl_hdr)
  758. return -ENOMEM;
  759. tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr;
  760. if (use_flash_temp)
  761. goto flash_temp;
  762. err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
  763. if (err) {
  764. flash_temp:
  765. err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
  766. temp_size);
  767. if (err) {
  768. dev_err(&adapter->pdev->dev,
  769. "Failed to get minidump template header %d\n",
  770. err);
  771. vfree(ahw->fw_dump.tmpl_hdr);
  772. ahw->fw_dump.tmpl_hdr = NULL;
  773. return -EIO;
  774. }
  775. }
  776. csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
  777. if (csum) {
  778. dev_err(&adapter->pdev->dev,
  779. "Template header checksum validation failed\n");
  780. vfree(ahw->fw_dump.tmpl_hdr);
  781. ahw->fw_dump.tmpl_hdr = NULL;
  782. return -EIO;
  783. }
  784. tmpl_hdr = ahw->fw_dump.tmpl_hdr;
  785. tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
  786. ahw->fw_dump.enable = 1;
  787. return 0;
  788. }
  789. int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
  790. {
  791. __le32 *buffer;
  792. u32 ocm_window;
  793. char mesg[64];
  794. char *msg[] = {mesg, NULL};
  795. int i, k, ops_cnt, ops_index, dump_size = 0;
  796. u32 entry_offset, dump, no_entries, buf_offset = 0;
  797. struct qlcnic_dump_entry *entry;
  798. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  799. struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
  800. static const struct qlcnic_dump_operations *fw_dump_ops;
  801. struct qlcnic_hardware_context *ahw;
  802. ahw = adapter->ahw;
  803. if (!fw_dump->enable) {
  804. dev_info(&adapter->pdev->dev, "Dump not enabled\n");
  805. return -EIO;
  806. }
  807. if (fw_dump->clr) {
  808. dev_info(&adapter->pdev->dev,
  809. "Previous dump not cleared, not capturing dump\n");
  810. return -EIO;
  811. }
  812. netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
  813. /* Calculate the size for dump data area only */
  814. for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
  815. if (i & tmpl_hdr->drv_cap_mask)
  816. dump_size += tmpl_hdr->cap_sizes[k];
  817. if (!dump_size)
  818. return -EIO;
  819. fw_dump->data = vzalloc(dump_size);
  820. if (!fw_dump->data)
  821. return -ENOMEM;
  822. buffer = fw_dump->data;
  823. fw_dump->size = dump_size;
  824. no_entries = tmpl_hdr->num_entries;
  825. entry_offset = tmpl_hdr->offset;
  826. tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
  827. tmpl_hdr->sys_info[1] = adapter->fw_version;
  828. if (qlcnic_82xx_check(adapter)) {
  829. ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
  830. fw_dump_ops = qlcnic_fw_dump_ops;
  831. } else {
  832. ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
  833. fw_dump_ops = qlcnic_83xx_fw_dump_ops;
  834. ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func];
  835. tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
  836. tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
  837. }
  838. for (i = 0; i < no_entries; i++) {
  839. entry = (void *)tmpl_hdr + entry_offset;
  840. if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
  841. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  842. entry_offset += entry->hdr.offset;
  843. continue;
  844. }
  845. /* Find the handler for this entry */
  846. ops_index = 0;
  847. while (ops_index < ops_cnt) {
  848. if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
  849. break;
  850. ops_index++;
  851. }
  852. if (ops_index == ops_cnt) {
  853. dev_info(&adapter->pdev->dev,
  854. "Invalid entry type %d, exiting dump\n",
  855. entry->hdr.type);
  856. goto error;
  857. }
  858. /* Collect dump for this entry */
  859. dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
  860. if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
  861. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  862. buf_offset += entry->hdr.cap_size;
  863. entry_offset += entry->hdr.offset;
  864. buffer = fw_dump->data + buf_offset;
  865. }
  866. if (dump_size != buf_offset) {
  867. dev_info(&adapter->pdev->dev,
  868. "Captured(%d) and expected size(%d) do not match\n",
  869. buf_offset, dump_size);
  870. goto error;
  871. } else {
  872. fw_dump->clr = 1;
  873. snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
  874. adapter->netdev->name);
  875. dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
  876. adapter->netdev->name, fw_dump->size);
  877. /* Send a udev event to notify availability of FW dump */
  878. kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
  879. return 0;
  880. }
  881. error:
  882. vfree(fw_dump->data);
  883. return -EINVAL;
  884. }
  885. void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
  886. {
  887. u32 prev_version, current_version;
  888. struct qlcnic_hardware_context *ahw = adapter->ahw;
  889. struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
  890. struct pci_dev *pdev = adapter->pdev;
  891. prev_version = adapter->fw_version;
  892. current_version = qlcnic_83xx_get_fw_version(adapter);
  893. if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
  894. if (fw_dump->tmpl_hdr)
  895. vfree(fw_dump->tmpl_hdr);
  896. if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
  897. dev_info(&pdev->dev, "Supports FW dump capability\n");
  898. }
  899. }