qlcnic_minidump.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022
  1. #include "qlcnic.h"
  2. #include "qlcnic_hdr.h"
  3. #include "qlcnic_83xx_hw.h"
  4. #include "qlcnic_hw.h"
  5. #include <net/ip.h>
  6. #define QLC_83XX_MINIDUMP_FLASH 0x520000
  7. #define QLC_83XX_OCM_INDEX 3
  8. #define QLC_83XX_PCI_INDEX 0
  9. static const u32 qlcnic_ms_read_data[] = {
  10. 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
  11. };
  12. #define QLCNIC_DUMP_WCRB BIT_0
  13. #define QLCNIC_DUMP_RWCRB BIT_1
  14. #define QLCNIC_DUMP_ANDCRB BIT_2
  15. #define QLCNIC_DUMP_ORCRB BIT_3
  16. #define QLCNIC_DUMP_POLLCRB BIT_4
  17. #define QLCNIC_DUMP_RD_SAVE BIT_5
  18. #define QLCNIC_DUMP_WRT_SAVED BIT_6
  19. #define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
  20. #define QLCNIC_DUMP_SKIP BIT_7
  21. #define QLCNIC_DUMP_MASK_MAX 0xff
  22. struct qlcnic_common_entry_hdr {
  23. u32 type;
  24. u32 offset;
  25. u32 cap_size;
  26. u8 mask;
  27. u8 rsvd[2];
  28. u8 flags;
  29. } __packed;
  30. struct __crb {
  31. u32 addr;
  32. u8 stride;
  33. u8 rsvd1[3];
  34. u32 data_size;
  35. u32 no_ops;
  36. u32 rsvd2[4];
  37. } __packed;
  38. struct __ctrl {
  39. u32 addr;
  40. u8 stride;
  41. u8 index_a;
  42. u16 timeout;
  43. u32 data_size;
  44. u32 no_ops;
  45. u8 opcode;
  46. u8 index_v;
  47. u8 shl_val;
  48. u8 shr_val;
  49. u32 val1;
  50. u32 val2;
  51. u32 val3;
  52. } __packed;
  53. struct __cache {
  54. u32 addr;
  55. u16 stride;
  56. u16 init_tag_val;
  57. u32 size;
  58. u32 no_ops;
  59. u32 ctrl_addr;
  60. u32 ctrl_val;
  61. u32 read_addr;
  62. u8 read_addr_stride;
  63. u8 read_addr_num;
  64. u8 rsvd1[2];
  65. } __packed;
  66. struct __ocm {
  67. u8 rsvd[8];
  68. u32 size;
  69. u32 no_ops;
  70. u8 rsvd1[8];
  71. u32 read_addr;
  72. u32 read_addr_stride;
  73. } __packed;
  74. struct __mem {
  75. u8 rsvd[24];
  76. u32 addr;
  77. u32 size;
  78. } __packed;
  79. struct __mux {
  80. u32 addr;
  81. u8 rsvd[4];
  82. u32 size;
  83. u32 no_ops;
  84. u32 val;
  85. u32 val_stride;
  86. u32 read_addr;
  87. u8 rsvd2[4];
  88. } __packed;
  89. struct __queue {
  90. u32 sel_addr;
  91. u16 stride;
  92. u8 rsvd[2];
  93. u32 size;
  94. u32 no_ops;
  95. u8 rsvd2[8];
  96. u32 read_addr;
  97. u8 read_addr_stride;
  98. u8 read_addr_cnt;
  99. u8 rsvd3[2];
  100. } __packed;
  101. struct __pollrd {
  102. u32 sel_addr;
  103. u32 read_addr;
  104. u32 sel_val;
  105. u16 sel_val_stride;
  106. u16 no_ops;
  107. u32 poll_wait;
  108. u32 poll_mask;
  109. u32 data_size;
  110. u8 rsvd[4];
  111. } __packed;
  112. struct __mux2 {
  113. u32 sel_addr1;
  114. u32 sel_addr2;
  115. u32 sel_val1;
  116. u32 sel_val2;
  117. u32 no_ops;
  118. u32 sel_val_mask;
  119. u32 read_addr;
  120. u8 sel_val_stride;
  121. u8 data_size;
  122. u8 rsvd[2];
  123. } __packed;
  124. struct __pollrdmwr {
  125. u32 addr1;
  126. u32 addr2;
  127. u32 val1;
  128. u32 val2;
  129. u32 poll_wait;
  130. u32 poll_mask;
  131. u32 mod_mask;
  132. u32 data_size;
  133. } __packed;
  134. struct qlcnic_dump_entry {
  135. struct qlcnic_common_entry_hdr hdr;
  136. union {
  137. struct __crb crb;
  138. struct __cache cache;
  139. struct __ocm ocm;
  140. struct __mem mem;
  141. struct __mux mux;
  142. struct __queue que;
  143. struct __ctrl ctrl;
  144. struct __pollrdmwr pollrdmwr;
  145. struct __mux2 mux2;
  146. struct __pollrd pollrd;
  147. } region;
  148. } __packed;
  149. enum qlcnic_minidump_opcode {
  150. QLCNIC_DUMP_NOP = 0,
  151. QLCNIC_DUMP_READ_CRB = 1,
  152. QLCNIC_DUMP_READ_MUX = 2,
  153. QLCNIC_DUMP_QUEUE = 3,
  154. QLCNIC_DUMP_BRD_CONFIG = 4,
  155. QLCNIC_DUMP_READ_OCM = 6,
  156. QLCNIC_DUMP_PEG_REG = 7,
  157. QLCNIC_DUMP_L1_DTAG = 8,
  158. QLCNIC_DUMP_L1_ITAG = 9,
  159. QLCNIC_DUMP_L1_DATA = 11,
  160. QLCNIC_DUMP_L1_INST = 12,
  161. QLCNIC_DUMP_L2_DTAG = 21,
  162. QLCNIC_DUMP_L2_ITAG = 22,
  163. QLCNIC_DUMP_L2_DATA = 23,
  164. QLCNIC_DUMP_L2_INST = 24,
  165. QLCNIC_DUMP_POLL_RD = 35,
  166. QLCNIC_READ_MUX2 = 36,
  167. QLCNIC_READ_POLLRDMWR = 37,
  168. QLCNIC_DUMP_READ_ROM = 71,
  169. QLCNIC_DUMP_READ_MEM = 72,
  170. QLCNIC_DUMP_READ_CTRL = 98,
  171. QLCNIC_DUMP_TLHDR = 99,
  172. QLCNIC_DUMP_RDEND = 255
  173. };
  174. struct qlcnic_dump_operations {
  175. enum qlcnic_minidump_opcode opcode;
  176. u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
  177. __le32 *);
  178. };
  179. static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
  180. struct qlcnic_dump_entry *entry, __le32 *buffer)
  181. {
  182. int i;
  183. u32 addr, data;
  184. struct __crb *crb = &entry->region.crb;
  185. addr = crb->addr;
  186. for (i = 0; i < crb->no_ops; i++) {
  187. data = qlcnic_ind_rd(adapter, addr);
  188. *buffer++ = cpu_to_le32(addr);
  189. *buffer++ = cpu_to_le32(data);
  190. addr += crb->stride;
  191. }
  192. return crb->no_ops * 2 * sizeof(u32);
  193. }
  194. static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
  195. struct qlcnic_dump_entry *entry, __le32 *buffer)
  196. {
  197. int i, k, timeout = 0;
  198. u32 addr, data;
  199. u8 no_ops;
  200. struct __ctrl *ctr = &entry->region.ctrl;
  201. struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
  202. addr = ctr->addr;
  203. no_ops = ctr->no_ops;
  204. for (i = 0; i < no_ops; i++) {
  205. k = 0;
  206. for (k = 0; k < 8; k++) {
  207. if (!(ctr->opcode & (1 << k)))
  208. continue;
  209. switch (1 << k) {
  210. case QLCNIC_DUMP_WCRB:
  211. qlcnic_ind_wr(adapter, addr, ctr->val1);
  212. break;
  213. case QLCNIC_DUMP_RWCRB:
  214. data = qlcnic_ind_rd(adapter, addr);
  215. qlcnic_ind_wr(adapter, addr, data);
  216. break;
  217. case QLCNIC_DUMP_ANDCRB:
  218. data = qlcnic_ind_rd(adapter, addr);
  219. qlcnic_ind_wr(adapter, addr,
  220. (data & ctr->val2));
  221. break;
  222. case QLCNIC_DUMP_ORCRB:
  223. data = qlcnic_ind_rd(adapter, addr);
  224. qlcnic_ind_wr(adapter, addr,
  225. (data | ctr->val3));
  226. break;
  227. case QLCNIC_DUMP_POLLCRB:
  228. while (timeout <= ctr->timeout) {
  229. data = qlcnic_ind_rd(adapter, addr);
  230. if ((data & ctr->val2) == ctr->val1)
  231. break;
  232. usleep_range(1000, 2000);
  233. timeout++;
  234. }
  235. if (timeout > ctr->timeout) {
  236. dev_info(&adapter->pdev->dev,
  237. "Timed out, aborting poll CRB\n");
  238. return -EINVAL;
  239. }
  240. break;
  241. case QLCNIC_DUMP_RD_SAVE:
  242. if (ctr->index_a)
  243. addr = t_hdr->saved_state[ctr->index_a];
  244. data = qlcnic_ind_rd(adapter, addr);
  245. t_hdr->saved_state[ctr->index_v] = data;
  246. break;
  247. case QLCNIC_DUMP_WRT_SAVED:
  248. if (ctr->index_v)
  249. data = t_hdr->saved_state[ctr->index_v];
  250. else
  251. data = ctr->val1;
  252. if (ctr->index_a)
  253. addr = t_hdr->saved_state[ctr->index_a];
  254. qlcnic_ind_wr(adapter, addr, data);
  255. break;
  256. case QLCNIC_DUMP_MOD_SAVE_ST:
  257. data = t_hdr->saved_state[ctr->index_v];
  258. data <<= ctr->shl_val;
  259. data >>= ctr->shr_val;
  260. if (ctr->val2)
  261. data &= ctr->val2;
  262. data |= ctr->val3;
  263. data += ctr->val1;
  264. t_hdr->saved_state[ctr->index_v] = data;
  265. break;
  266. default:
  267. dev_info(&adapter->pdev->dev,
  268. "Unknown opcode\n");
  269. break;
  270. }
  271. }
  272. addr += ctr->stride;
  273. }
  274. return 0;
  275. }
  276. static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
  277. struct qlcnic_dump_entry *entry, __le32 *buffer)
  278. {
  279. int loop;
  280. u32 val, data = 0;
  281. struct __mux *mux = &entry->region.mux;
  282. val = mux->val;
  283. for (loop = 0; loop < mux->no_ops; loop++) {
  284. qlcnic_ind_wr(adapter, mux->addr, val);
  285. data = qlcnic_ind_rd(adapter, mux->read_addr);
  286. *buffer++ = cpu_to_le32(val);
  287. *buffer++ = cpu_to_le32(data);
  288. val += mux->val_stride;
  289. }
  290. return 2 * mux->no_ops * sizeof(u32);
  291. }
  292. static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
  293. struct qlcnic_dump_entry *entry, __le32 *buffer)
  294. {
  295. int i, loop;
  296. u32 cnt, addr, data, que_id = 0;
  297. struct __queue *que = &entry->region.que;
  298. addr = que->read_addr;
  299. cnt = que->read_addr_cnt;
  300. for (loop = 0; loop < que->no_ops; loop++) {
  301. qlcnic_ind_wr(adapter, que->sel_addr, que_id);
  302. addr = que->read_addr;
  303. for (i = 0; i < cnt; i++) {
  304. data = qlcnic_ind_rd(adapter, addr);
  305. *buffer++ = cpu_to_le32(data);
  306. addr += que->read_addr_stride;
  307. }
  308. que_id += que->stride;
  309. }
  310. return que->no_ops * cnt * sizeof(u32);
  311. }
  312. static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
  313. struct qlcnic_dump_entry *entry, __le32 *buffer)
  314. {
  315. int i;
  316. u32 data;
  317. void __iomem *addr;
  318. struct __ocm *ocm = &entry->region.ocm;
  319. addr = adapter->ahw->pci_base0 + ocm->read_addr;
  320. for (i = 0; i < ocm->no_ops; i++) {
  321. data = readl(addr);
  322. *buffer++ = cpu_to_le32(data);
  323. addr += ocm->read_addr_stride;
  324. }
  325. return ocm->no_ops * sizeof(u32);
  326. }
  327. static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
  328. struct qlcnic_dump_entry *entry, __le32 *buffer)
  329. {
  330. int i, count = 0;
  331. u32 fl_addr, size, val, lck_val, addr;
  332. struct __mem *rom = &entry->region.mem;
  333. fl_addr = rom->addr;
  334. size = rom->size / 4;
  335. lock_try:
  336. lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
  337. if (!lck_val && count < MAX_CTL_CHECK) {
  338. usleep_range(10000, 11000);
  339. count++;
  340. goto lock_try;
  341. }
  342. QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
  343. adapter->ahw->pci_func);
  344. for (i = 0; i < size; i++) {
  345. addr = fl_addr & 0xFFFF0000;
  346. qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
  347. addr = LSW(fl_addr) + FLASH_ROM_DATA;
  348. val = qlcnic_ind_rd(adapter, addr);
  349. fl_addr += 4;
  350. *buffer++ = cpu_to_le32(val);
  351. }
  352. QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
  353. return rom->size;
  354. }
  355. static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
  356. struct qlcnic_dump_entry *entry, __le32 *buffer)
  357. {
  358. int i;
  359. u32 cnt, val, data, addr;
  360. struct __cache *l1 = &entry->region.cache;
  361. val = l1->init_tag_val;
  362. for (i = 0; i < l1->no_ops; i++) {
  363. qlcnic_ind_wr(adapter, l1->addr, val);
  364. qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
  365. addr = l1->read_addr;
  366. cnt = l1->read_addr_num;
  367. while (cnt) {
  368. data = qlcnic_ind_rd(adapter, addr);
  369. *buffer++ = cpu_to_le32(data);
  370. addr += l1->read_addr_stride;
  371. cnt--;
  372. }
  373. val += l1->stride;
  374. }
  375. return l1->no_ops * l1->read_addr_num * sizeof(u32);
  376. }
  377. static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
  378. struct qlcnic_dump_entry *entry, __le32 *buffer)
  379. {
  380. int i;
  381. u32 cnt, val, data, addr;
  382. u8 poll_mask, poll_to, time_out = 0;
  383. struct __cache *l2 = &entry->region.cache;
  384. val = l2->init_tag_val;
  385. poll_mask = LSB(MSW(l2->ctrl_val));
  386. poll_to = MSB(MSW(l2->ctrl_val));
  387. for (i = 0; i < l2->no_ops; i++) {
  388. qlcnic_ind_wr(adapter, l2->addr, val);
  389. if (LSW(l2->ctrl_val))
  390. qlcnic_ind_wr(adapter, l2->ctrl_addr,
  391. LSW(l2->ctrl_val));
  392. if (!poll_mask)
  393. goto skip_poll;
  394. do {
  395. data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
  396. if (!(data & poll_mask))
  397. break;
  398. usleep_range(1000, 2000);
  399. time_out++;
  400. } while (time_out <= poll_to);
  401. if (time_out > poll_to) {
  402. dev_err(&adapter->pdev->dev,
  403. "Timeout exceeded in %s, aborting dump\n",
  404. __func__);
  405. return -EINVAL;
  406. }
  407. skip_poll:
  408. addr = l2->read_addr;
  409. cnt = l2->read_addr_num;
  410. while (cnt) {
  411. data = qlcnic_ind_rd(adapter, addr);
  412. *buffer++ = cpu_to_le32(data);
  413. addr += l2->read_addr_stride;
  414. cnt--;
  415. }
  416. val += l2->stride;
  417. }
  418. return l2->no_ops * l2->read_addr_num * sizeof(u32);
  419. }
  420. static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
  421. struct qlcnic_dump_entry *entry, __le32 *buffer)
  422. {
  423. u32 addr, data, test, ret = 0;
  424. int i, reg_read;
  425. struct __mem *mem = &entry->region.mem;
  426. reg_read = mem->size;
  427. addr = mem->addr;
  428. /* check for data size of multiple of 16 and 16 byte alignment */
  429. if ((addr & 0xf) || (reg_read%16)) {
  430. dev_info(&adapter->pdev->dev,
  431. "Unaligned memory addr:0x%x size:0x%x\n",
  432. addr, reg_read);
  433. return -EINVAL;
  434. }
  435. mutex_lock(&adapter->ahw->mem_lock);
  436. while (reg_read != 0) {
  437. qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
  438. qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
  439. qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
  440. for (i = 0; i < MAX_CTL_CHECK; i++) {
  441. test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
  442. if (!(test & TA_CTL_BUSY))
  443. break;
  444. }
  445. if (i == MAX_CTL_CHECK) {
  446. if (printk_ratelimit()) {
  447. dev_err(&adapter->pdev->dev,
  448. "failed to read through agent\n");
  449. ret = -EINVAL;
  450. goto out;
  451. }
  452. }
  453. for (i = 0; i < 4; i++) {
  454. data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
  455. *buffer++ = cpu_to_le32(data);
  456. }
  457. addr += 16;
  458. reg_read -= 16;
  459. ret += 16;
  460. }
  461. out:
  462. mutex_unlock(&adapter->ahw->mem_lock);
  463. return mem->size;
  464. }
  465. static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
  466. struct qlcnic_dump_entry *entry, __le32 *buffer)
  467. {
  468. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  469. return 0;
  470. }
  471. static int qlcnic_valid_dump_entry(struct device *dev,
  472. struct qlcnic_dump_entry *entry, u32 size)
  473. {
  474. int ret = 1;
  475. if (size != entry->hdr.cap_size) {
  476. dev_err(dev,
  477. "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
  478. entry->hdr.type, entry->hdr.mask, size,
  479. entry->hdr.cap_size);
  480. ret = 0;
  481. }
  482. return ret;
  483. }
  484. static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
  485. struct qlcnic_dump_entry *entry,
  486. __le32 *buffer)
  487. {
  488. struct __pollrdmwr *poll = &entry->region.pollrdmwr;
  489. u32 data, wait_count, poll_wait, temp;
  490. poll_wait = poll->poll_wait;
  491. qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
  492. wait_count = 0;
  493. while (wait_count < poll_wait) {
  494. data = qlcnic_ind_rd(adapter, poll->addr1);
  495. if ((data & poll->poll_mask) != 0)
  496. break;
  497. wait_count++;
  498. }
  499. if (wait_count == poll_wait) {
  500. dev_err(&adapter->pdev->dev,
  501. "Timeout exceeded in %s, aborting dump\n",
  502. __func__);
  503. return 0;
  504. }
  505. data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
  506. qlcnic_ind_wr(adapter, poll->addr2, data);
  507. qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
  508. wait_count = 0;
  509. while (wait_count < poll_wait) {
  510. temp = qlcnic_ind_rd(adapter, poll->addr1);
  511. if ((temp & poll->poll_mask) != 0)
  512. break;
  513. wait_count++;
  514. }
  515. *buffer++ = cpu_to_le32(poll->addr2);
  516. *buffer++ = cpu_to_le32(data);
  517. return 2 * sizeof(u32);
  518. }
  519. static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
  520. struct qlcnic_dump_entry *entry, __le32 *buffer)
  521. {
  522. struct __pollrd *pollrd = &entry->region.pollrd;
  523. u32 data, wait_count, poll_wait, sel_val;
  524. int i;
  525. poll_wait = pollrd->poll_wait;
  526. sel_val = pollrd->sel_val;
  527. for (i = 0; i < pollrd->no_ops; i++) {
  528. qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
  529. wait_count = 0;
  530. while (wait_count < poll_wait) {
  531. data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
  532. if ((data & pollrd->poll_mask) != 0)
  533. break;
  534. wait_count++;
  535. }
  536. if (wait_count == poll_wait) {
  537. dev_err(&adapter->pdev->dev,
  538. "Timeout exceeded in %s, aborting dump\n",
  539. __func__);
  540. return 0;
  541. }
  542. data = qlcnic_ind_rd(adapter, pollrd->read_addr);
  543. *buffer++ = cpu_to_le32(sel_val);
  544. *buffer++ = cpu_to_le32(data);
  545. sel_val += pollrd->sel_val_stride;
  546. }
  547. return pollrd->no_ops * (2 * sizeof(u32));
  548. }
  549. static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
  550. struct qlcnic_dump_entry *entry, __le32 *buffer)
  551. {
  552. struct __mux2 *mux2 = &entry->region.mux2;
  553. u32 data;
  554. u32 t_sel_val, sel_val1, sel_val2;
  555. int i;
  556. sel_val1 = mux2->sel_val1;
  557. sel_val2 = mux2->sel_val2;
  558. for (i = 0; i < mux2->no_ops; i++) {
  559. qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
  560. t_sel_val = sel_val1 & mux2->sel_val_mask;
  561. qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
  562. data = qlcnic_ind_rd(adapter, mux2->read_addr);
  563. *buffer++ = cpu_to_le32(t_sel_val);
  564. *buffer++ = cpu_to_le32(data);
  565. qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
  566. t_sel_val = sel_val2 & mux2->sel_val_mask;
  567. qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
  568. data = qlcnic_ind_rd(adapter, mux2->read_addr);
  569. *buffer++ = cpu_to_le32(t_sel_val);
  570. *buffer++ = cpu_to_le32(data);
  571. sel_val1 += mux2->sel_val_stride;
  572. sel_val2 += mux2->sel_val_stride;
  573. }
  574. return mux2->no_ops * (4 * sizeof(u32));
  575. }
  576. static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
  577. struct qlcnic_dump_entry *entry, __le32 *buffer)
  578. {
  579. u32 fl_addr, size;
  580. struct __mem *rom = &entry->region.mem;
  581. fl_addr = rom->addr;
  582. size = rom->size / 4;
  583. if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
  584. (u8 *)buffer, size))
  585. return rom->size;
  586. return 0;
  587. }
  588. static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
  589. {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
  590. {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
  591. {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
  592. {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
  593. {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
  594. {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
  595. {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
  596. {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
  597. {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
  598. {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
  599. {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
  600. {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
  601. {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
  602. {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
  603. {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
  604. {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
  605. {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
  606. {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
  607. {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
  608. {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
  609. };
  610. static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
  611. {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
  612. {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
  613. {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
  614. {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
  615. {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
  616. {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
  617. {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
  618. {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
  619. {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
  620. {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
  621. {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
  622. {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
  623. {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
  624. {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
  625. {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
  626. {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
  627. {QLCNIC_READ_MUX2, qlcnic_read_mux2},
  628. {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
  629. {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
  630. {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
  631. {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
  632. {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
  633. {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
  634. };
  635. static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
  636. {
  637. uint64_t sum = 0;
  638. int count = temp_size / sizeof(uint32_t);
  639. while (count-- > 0)
  640. sum += *temp_buffer++;
  641. while (sum >> 32)
  642. sum = (sum & 0xFFFFFFFF) + (sum >> 32);
  643. return ~sum;
  644. }
  645. static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
  646. u8 *buffer, u32 size)
  647. {
  648. int ret = 0;
  649. if (qlcnic_82xx_check(adapter))
  650. return -EIO;
  651. if (qlcnic_83xx_lock_flash(adapter))
  652. return -EIO;
  653. ret = qlcnic_83xx_lockless_flash_read32(adapter,
  654. QLC_83XX_MINIDUMP_FLASH,
  655. buffer, size / sizeof(u32));
  656. qlcnic_83xx_unlock_flash(adapter);
  657. return ret;
  658. }
  659. static int
  660. qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
  661. struct qlcnic_cmd_args *cmd)
  662. {
  663. struct qlcnic_dump_template_hdr tmp_hdr;
  664. u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32);
  665. int ret = 0;
  666. if (qlcnic_82xx_check(adapter))
  667. return -EIO;
  668. if (qlcnic_83xx_lock_flash(adapter))
  669. return -EIO;
  670. ret = qlcnic_83xx_lockless_flash_read32(adapter,
  671. QLC_83XX_MINIDUMP_FLASH,
  672. (u8 *)&tmp_hdr, size);
  673. qlcnic_83xx_unlock_flash(adapter);
  674. cmd->rsp.arg[2] = tmp_hdr.size;
  675. cmd->rsp.arg[3] = tmp_hdr.version;
  676. return ret;
  677. }
  678. static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
  679. u32 *version, u32 *temp_size,
  680. u8 *use_flash_temp)
  681. {
  682. int err = 0;
  683. struct qlcnic_cmd_args cmd;
  684. if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
  685. return -ENOMEM;
  686. err = qlcnic_issue_cmd(adapter, &cmd);
  687. if (err != QLCNIC_RCODE_SUCCESS) {
  688. if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
  689. qlcnic_free_mbx_args(&cmd);
  690. return -EIO;
  691. }
  692. *use_flash_temp = 1;
  693. }
  694. *temp_size = cmd.rsp.arg[2];
  695. *version = cmd.rsp.arg[3];
  696. qlcnic_free_mbx_args(&cmd);
  697. if (!(*temp_size))
  698. return -EIO;
  699. return 0;
  700. }
  701. static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
  702. u32 *buffer, u32 temp_size)
  703. {
  704. int err = 0, i;
  705. void *tmp_addr;
  706. __le32 *tmp_buf;
  707. struct qlcnic_cmd_args cmd;
  708. dma_addr_t tmp_addr_t = 0;
  709. tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
  710. &tmp_addr_t, GFP_KERNEL);
  711. if (!tmp_addr) {
  712. dev_err(&adapter->pdev->dev,
  713. "Can't get memory for FW dump template\n");
  714. return -ENOMEM;
  715. }
  716. if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
  717. err = -ENOMEM;
  718. goto free_mem;
  719. }
  720. cmd.req.arg[1] = LSD(tmp_addr_t);
  721. cmd.req.arg[2] = MSD(tmp_addr_t);
  722. cmd.req.arg[3] = temp_size;
  723. err = qlcnic_issue_cmd(adapter, &cmd);
  724. tmp_buf = tmp_addr;
  725. if (err == QLCNIC_RCODE_SUCCESS) {
  726. for (i = 0; i < temp_size / sizeof(u32); i++)
  727. *buffer++ = __le32_to_cpu(*tmp_buf++);
  728. }
  729. qlcnic_free_mbx_args(&cmd);
  730. free_mem:
  731. dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
  732. return err;
  733. }
  734. int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
  735. {
  736. int err;
  737. u32 temp_size = 0;
  738. u32 version, csum, *tmp_buf;
  739. struct qlcnic_hardware_context *ahw;
  740. struct qlcnic_dump_template_hdr *tmpl_hdr;
  741. u8 use_flash_temp = 0;
  742. ahw = adapter->ahw;
  743. err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
  744. &use_flash_temp);
  745. if (err) {
  746. dev_err(&adapter->pdev->dev,
  747. "Can't get template size %d\n", err);
  748. return -EIO;
  749. }
  750. ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
  751. if (!ahw->fw_dump.tmpl_hdr)
  752. return -ENOMEM;
  753. tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr;
  754. if (use_flash_temp)
  755. goto flash_temp;
  756. err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
  757. if (err) {
  758. flash_temp:
  759. err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
  760. temp_size);
  761. if (err) {
  762. dev_err(&adapter->pdev->dev,
  763. "Failed to get minidump template header %d\n",
  764. err);
  765. vfree(ahw->fw_dump.tmpl_hdr);
  766. ahw->fw_dump.tmpl_hdr = NULL;
  767. return -EIO;
  768. }
  769. }
  770. csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
  771. if (csum) {
  772. dev_err(&adapter->pdev->dev,
  773. "Template header checksum validation failed\n");
  774. vfree(ahw->fw_dump.tmpl_hdr);
  775. ahw->fw_dump.tmpl_hdr = NULL;
  776. return -EIO;
  777. }
  778. tmpl_hdr = ahw->fw_dump.tmpl_hdr;
  779. tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
  780. ahw->fw_dump.enable = 1;
  781. return 0;
  782. }
  783. int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
  784. {
  785. __le32 *buffer;
  786. u32 ocm_window;
  787. char mesg[64];
  788. char *msg[] = {mesg, NULL};
  789. int i, k, ops_cnt, ops_index, dump_size = 0;
  790. u32 entry_offset, dump, no_entries, buf_offset = 0;
  791. struct qlcnic_dump_entry *entry;
  792. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  793. struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
  794. static const struct qlcnic_dump_operations *fw_dump_ops;
  795. struct qlcnic_hardware_context *ahw;
  796. ahw = adapter->ahw;
  797. if (!fw_dump->enable) {
  798. dev_info(&adapter->pdev->dev, "Dump not enabled\n");
  799. return -EIO;
  800. }
  801. if (fw_dump->clr) {
  802. dev_info(&adapter->pdev->dev,
  803. "Previous dump not cleared, not capturing dump\n");
  804. return -EIO;
  805. }
  806. netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
  807. /* Calculate the size for dump data area only */
  808. for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
  809. if (i & tmpl_hdr->drv_cap_mask)
  810. dump_size += tmpl_hdr->cap_sizes[k];
  811. if (!dump_size)
  812. return -EIO;
  813. fw_dump->data = vzalloc(dump_size);
  814. if (!fw_dump->data)
  815. return -ENOMEM;
  816. buffer = fw_dump->data;
  817. fw_dump->size = dump_size;
  818. no_entries = tmpl_hdr->num_entries;
  819. entry_offset = tmpl_hdr->offset;
  820. tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
  821. tmpl_hdr->sys_info[1] = adapter->fw_version;
  822. if (qlcnic_82xx_check(adapter)) {
  823. ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
  824. fw_dump_ops = qlcnic_fw_dump_ops;
  825. } else {
  826. ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
  827. fw_dump_ops = qlcnic_83xx_fw_dump_ops;
  828. ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func];
  829. tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
  830. tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
  831. }
  832. for (i = 0; i < no_entries; i++) {
  833. entry = (void *)tmpl_hdr + entry_offset;
  834. if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
  835. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  836. entry_offset += entry->hdr.offset;
  837. continue;
  838. }
  839. /* Find the handler for this entry */
  840. ops_index = 0;
  841. while (ops_index < ops_cnt) {
  842. if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
  843. break;
  844. ops_index++;
  845. }
  846. if (ops_index == ops_cnt) {
  847. dev_info(&adapter->pdev->dev,
  848. "Invalid entry type %d, exiting dump\n",
  849. entry->hdr.type);
  850. goto error;
  851. }
  852. /* Collect dump for this entry */
  853. dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
  854. if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
  855. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  856. buf_offset += entry->hdr.cap_size;
  857. entry_offset += entry->hdr.offset;
  858. buffer = fw_dump->data + buf_offset;
  859. }
  860. if (dump_size != buf_offset) {
  861. dev_info(&adapter->pdev->dev,
  862. "Captured(%d) and expected size(%d) do not match\n",
  863. buf_offset, dump_size);
  864. goto error;
  865. } else {
  866. fw_dump->clr = 1;
  867. snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
  868. adapter->netdev->name);
  869. dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
  870. adapter->netdev->name, fw_dump->size);
  871. /* Send a udev event to notify availability of FW dump */
  872. kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
  873. return 0;
  874. }
  875. error:
  876. vfree(fw_dump->data);
  877. return -EINVAL;
  878. }
  879. void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
  880. {
  881. u32 prev_version, current_version;
  882. struct qlcnic_hardware_context *ahw = adapter->ahw;
  883. struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
  884. struct pci_dev *pdev = adapter->pdev;
  885. prev_version = adapter->fw_version;
  886. current_version = qlcnic_83xx_get_fw_version(adapter);
  887. if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
  888. if (fw_dump->tmpl_hdr)
  889. vfree(fw_dump->tmpl_hdr);
  890. if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
  891. dev_info(&pdev->dev, "Supports FW dump capability\n");
  892. }
  893. }