qlge_dbg.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028
  1. #include "qlge.h"
  2. static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
  3. {
  4. int status = 0;
  5. int i;
  6. for (i = 0; i < 8; i++, buf++) {
  7. ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
  8. *buf = ql_read32(qdev, NIC_ETS);
  9. }
  10. for (i = 0; i < 2; i++, buf++) {
  11. ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
  12. *buf = ql_read32(qdev, CNA_ETS);
  13. }
  14. return status;
  15. }
  16. static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
  17. {
  18. int i;
  19. for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
  20. ql_write32(qdev, INTR_EN,
  21. qdev->intr_context[i].intr_read_mask);
  22. *buf = ql_read32(qdev, INTR_EN);
  23. }
  24. }
  25. static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
  26. {
  27. int i, status;
  28. u32 value[3];
  29. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  30. if (status)
  31. return status;
  32. for (i = 0; i < 16; i++) {
  33. status = ql_get_mac_addr_reg(qdev,
  34. MAC_ADDR_TYPE_CAM_MAC, i, value);
  35. if (status) {
  36. QPRINTK(qdev, DRV, ERR,
  37. "Failed read of mac index register.\n");
  38. goto err;
  39. }
  40. *buf++ = value[0]; /* lower MAC address */
  41. *buf++ = value[1]; /* upper MAC address */
  42. *buf++ = value[2]; /* output */
  43. }
  44. for (i = 0; i < 32; i++) {
  45. status = ql_get_mac_addr_reg(qdev,
  46. MAC_ADDR_TYPE_MULTI_MAC, i, value);
  47. if (status) {
  48. QPRINTK(qdev, DRV, ERR,
  49. "Failed read of mac index register.\n");
  50. goto err;
  51. }
  52. *buf++ = value[0]; /* lower Mcast address */
  53. *buf++ = value[1]; /* upper Mcast address */
  54. }
  55. err:
  56. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  57. return status;
  58. }
  59. static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
  60. {
  61. int status;
  62. u32 value, i;
  63. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  64. if (status)
  65. return status;
  66. for (i = 0; i < 16; i++) {
  67. status = ql_get_routing_reg(qdev, i, &value);
  68. if (status) {
  69. QPRINTK(qdev, DRV, ERR,
  70. "Failed read of routing index register.\n");
  71. goto err;
  72. } else {
  73. *buf++ = value;
  74. }
  75. }
  76. err:
  77. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  78. return status;
  79. }
  80. /* Create a coredump segment header */
  81. static void ql_build_coredump_seg_header(
  82. struct mpi_coredump_segment_header *seg_hdr,
  83. u32 seg_number, u32 seg_size, u8 *desc)
  84. {
  85. memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
  86. seg_hdr->cookie = MPI_COREDUMP_COOKIE;
  87. seg_hdr->segNum = seg_number;
  88. seg_hdr->segSize = seg_size;
  89. memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
  90. }
  91. void ql_gen_reg_dump(struct ql_adapter *qdev,
  92. struct ql_reg_dump *mpi_coredump)
  93. {
  94. int i, status;
  95. memset(&(mpi_coredump->mpi_global_header), 0,
  96. sizeof(struct mpi_coredump_global_header));
  97. mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
  98. mpi_coredump->mpi_global_header.headerSize =
  99. sizeof(struct mpi_coredump_global_header);
  100. mpi_coredump->mpi_global_header.imageSize =
  101. sizeof(struct ql_reg_dump);
  102. memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
  103. sizeof(mpi_coredump->mpi_global_header.idString));
  104. /* segment 16 */
  105. ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
  106. MISC_NIC_INFO_SEG_NUM,
  107. sizeof(struct mpi_coredump_segment_header)
  108. + sizeof(mpi_coredump->misc_nic_info),
  109. "MISC NIC INFO");
  110. mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
  111. mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
  112. mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
  113. mpi_coredump->misc_nic_info.function = qdev->func;
  114. /* Segment 16, Rev C. Step 18 */
  115. ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
  116. NIC1_CONTROL_SEG_NUM,
  117. sizeof(struct mpi_coredump_segment_header)
  118. + sizeof(mpi_coredump->nic_regs),
  119. "NIC Registers");
  120. /* Get generic reg dump */
  121. for (i = 0; i < 64; i++)
  122. mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
  123. /* Segment 31 */
  124. /* Get indexed register values. */
  125. ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
  126. INTR_STATES_SEG_NUM,
  127. sizeof(struct mpi_coredump_segment_header)
  128. + sizeof(mpi_coredump->intr_states),
  129. "INTR States");
  130. ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
  131. ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
  132. CAM_ENTRIES_SEG_NUM,
  133. sizeof(struct mpi_coredump_segment_header)
  134. + sizeof(mpi_coredump->cam_entries),
  135. "CAM Entries");
  136. status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
  137. if (status)
  138. return;
  139. ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
  140. ROUTING_WORDS_SEG_NUM,
  141. sizeof(struct mpi_coredump_segment_header)
  142. + sizeof(mpi_coredump->nic_routing_words),
  143. "Routing Words");
  144. status = ql_get_routing_entries(qdev,
  145. &mpi_coredump->nic_routing_words[0]);
  146. if (status)
  147. return;
  148. /* Segment 34 (Rev C. step 23) */
  149. ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
  150. ETS_SEG_NUM,
  151. sizeof(struct mpi_coredump_segment_header)
  152. + sizeof(mpi_coredump->ets),
  153. "ETS Registers");
  154. status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
  155. if (status)
  156. return;
  157. }
  158. #ifdef QL_REG_DUMP
  159. static void ql_dump_intr_states(struct ql_adapter *qdev)
  160. {
  161. int i;
  162. u32 value;
  163. for (i = 0; i < qdev->intr_count; i++) {
  164. ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
  165. value = ql_read32(qdev, INTR_EN);
  166. printk(KERN_ERR PFX
  167. "%s: Interrupt %d is %s.\n",
  168. qdev->ndev->name, i,
  169. (value & INTR_EN_EN ? "enabled" : "disabled"));
  170. }
  171. }
  172. void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
  173. {
  174. u32 data;
  175. if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
  176. printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__);
  177. return;
  178. }
  179. ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data);
  180. printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name,
  181. data);
  182. ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data);
  183. printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name,
  184. data);
  185. ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
  186. printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name,
  187. data);
  188. ql_read_xgmac_reg(qdev, TX_CFG, &data);
  189. printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
  190. ql_read_xgmac_reg(qdev, RX_CFG, &data);
  191. printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
  192. ql_read_xgmac_reg(qdev, FLOW_CTL, &data);
  193. printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name,
  194. data);
  195. ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data);
  196. printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name,
  197. data);
  198. ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data);
  199. printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name,
  200. data);
  201. ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data);
  202. printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n",
  203. qdev->ndev->name, data);
  204. ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data);
  205. printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n",
  206. qdev->ndev->name, data);
  207. ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data);
  208. printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
  209. data);
  210. ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data);
  211. printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
  212. data);
  213. ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data);
  214. printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name,
  215. data);
  216. ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data);
  217. printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n",
  218. qdev->ndev->name, data);
  219. ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data);
  220. printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name,
  221. data);
  222. ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data);
  223. printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n",
  224. qdev->ndev->name, data);
  225. ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data);
  226. printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name,
  227. data);
  228. ql_sem_unlock(qdev, qdev->xg_sem_mask);
  229. }
  230. static void ql_dump_ets_regs(struct ql_adapter *qdev)
  231. {
  232. }
  233. static void ql_dump_cam_entries(struct ql_adapter *qdev)
  234. {
  235. int i;
  236. u32 value[3];
  237. i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  238. if (i)
  239. return;
  240. for (i = 0; i < 4; i++) {
  241. if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
  242. printk(KERN_ERR PFX
  243. "%s: Failed read of mac index register.\n",
  244. __func__);
  245. return;
  246. } else {
  247. if (value[0])
  248. printk(KERN_ERR PFX
  249. "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n",
  250. qdev->ndev->name, i, value[1], value[0],
  251. value[2]);
  252. }
  253. }
  254. for (i = 0; i < 32; i++) {
  255. if (ql_get_mac_addr_reg
  256. (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
  257. printk(KERN_ERR PFX
  258. "%s: Failed read of mac index register.\n",
  259. __func__);
  260. return;
  261. } else {
  262. if (value[0])
  263. printk(KERN_ERR PFX
  264. "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n",
  265. qdev->ndev->name, i, value[1], value[0]);
  266. }
  267. }
  268. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  269. }
  270. void ql_dump_routing_entries(struct ql_adapter *qdev)
  271. {
  272. int i;
  273. u32 value;
  274. i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  275. if (i)
  276. return;
  277. for (i = 0; i < 16; i++) {
  278. value = 0;
  279. if (ql_get_routing_reg(qdev, i, &value)) {
  280. printk(KERN_ERR PFX
  281. "%s: Failed read of routing index register.\n",
  282. __func__);
  283. return;
  284. } else {
  285. if (value)
  286. printk(KERN_ERR PFX
  287. "%s: Routing Mask %d = 0x%.08x.\n",
  288. qdev->ndev->name, i, value);
  289. }
  290. }
  291. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  292. }
  293. void ql_dump_regs(struct ql_adapter *qdev)
  294. {
  295. printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func);
  296. printk(KERN_ERR PFX "SYS = 0x%x.\n",
  297. ql_read32(qdev, SYS));
  298. printk(KERN_ERR PFX "RST_FO = 0x%x.\n",
  299. ql_read32(qdev, RST_FO));
  300. printk(KERN_ERR PFX "FSC = 0x%x.\n",
  301. ql_read32(qdev, FSC));
  302. printk(KERN_ERR PFX "CSR = 0x%x.\n",
  303. ql_read32(qdev, CSR));
  304. printk(KERN_ERR PFX "ICB_RID = 0x%x.\n",
  305. ql_read32(qdev, ICB_RID));
  306. printk(KERN_ERR PFX "ICB_L = 0x%x.\n",
  307. ql_read32(qdev, ICB_L));
  308. printk(KERN_ERR PFX "ICB_H = 0x%x.\n",
  309. ql_read32(qdev, ICB_H));
  310. printk(KERN_ERR PFX "CFG = 0x%x.\n",
  311. ql_read32(qdev, CFG));
  312. printk(KERN_ERR PFX "BIOS_ADDR = 0x%x.\n",
  313. ql_read32(qdev, BIOS_ADDR));
  314. printk(KERN_ERR PFX "STS = 0x%x.\n",
  315. ql_read32(qdev, STS));
  316. printk(KERN_ERR PFX "INTR_EN = 0x%x.\n",
  317. ql_read32(qdev, INTR_EN));
  318. printk(KERN_ERR PFX "INTR_MASK = 0x%x.\n",
  319. ql_read32(qdev, INTR_MASK));
  320. printk(KERN_ERR PFX "ISR1 = 0x%x.\n",
  321. ql_read32(qdev, ISR1));
  322. printk(KERN_ERR PFX "ISR2 = 0x%x.\n",
  323. ql_read32(qdev, ISR2));
  324. printk(KERN_ERR PFX "ISR3 = 0x%x.\n",
  325. ql_read32(qdev, ISR3));
  326. printk(KERN_ERR PFX "ISR4 = 0x%x.\n",
  327. ql_read32(qdev, ISR4));
  328. printk(KERN_ERR PFX "REV_ID = 0x%x.\n",
  329. ql_read32(qdev, REV_ID));
  330. printk(KERN_ERR PFX "FRC_ECC_ERR = 0x%x.\n",
  331. ql_read32(qdev, FRC_ECC_ERR));
  332. printk(KERN_ERR PFX "ERR_STS = 0x%x.\n",
  333. ql_read32(qdev, ERR_STS));
  334. printk(KERN_ERR PFX "RAM_DBG_ADDR = 0x%x.\n",
  335. ql_read32(qdev, RAM_DBG_ADDR));
  336. printk(KERN_ERR PFX "RAM_DBG_DATA = 0x%x.\n",
  337. ql_read32(qdev, RAM_DBG_DATA));
  338. printk(KERN_ERR PFX "ECC_ERR_CNT = 0x%x.\n",
  339. ql_read32(qdev, ECC_ERR_CNT));
  340. printk(KERN_ERR PFX "SEM = 0x%x.\n",
  341. ql_read32(qdev, SEM));
  342. printk(KERN_ERR PFX "GPIO_1 = 0x%x.\n",
  343. ql_read32(qdev, GPIO_1));
  344. printk(KERN_ERR PFX "GPIO_2 = 0x%x.\n",
  345. ql_read32(qdev, GPIO_2));
  346. printk(KERN_ERR PFX "GPIO_3 = 0x%x.\n",
  347. ql_read32(qdev, GPIO_3));
  348. printk(KERN_ERR PFX "XGMAC_ADDR = 0x%x.\n",
  349. ql_read32(qdev, XGMAC_ADDR));
  350. printk(KERN_ERR PFX "XGMAC_DATA = 0x%x.\n",
  351. ql_read32(qdev, XGMAC_DATA));
  352. printk(KERN_ERR PFX "NIC_ETS = 0x%x.\n",
  353. ql_read32(qdev, NIC_ETS));
  354. printk(KERN_ERR PFX "CNA_ETS = 0x%x.\n",
  355. ql_read32(qdev, CNA_ETS));
  356. printk(KERN_ERR PFX "FLASH_ADDR = 0x%x.\n",
  357. ql_read32(qdev, FLASH_ADDR));
  358. printk(KERN_ERR PFX "FLASH_DATA = 0x%x.\n",
  359. ql_read32(qdev, FLASH_DATA));
  360. printk(KERN_ERR PFX "CQ_STOP = 0x%x.\n",
  361. ql_read32(qdev, CQ_STOP));
  362. printk(KERN_ERR PFX "PAGE_TBL_RID = 0x%x.\n",
  363. ql_read32(qdev, PAGE_TBL_RID));
  364. printk(KERN_ERR PFX "WQ_PAGE_TBL_LO = 0x%x.\n",
  365. ql_read32(qdev, WQ_PAGE_TBL_LO));
  366. printk(KERN_ERR PFX "WQ_PAGE_TBL_HI = 0x%x.\n",
  367. ql_read32(qdev, WQ_PAGE_TBL_HI));
  368. printk(KERN_ERR PFX "CQ_PAGE_TBL_LO = 0x%x.\n",
  369. ql_read32(qdev, CQ_PAGE_TBL_LO));
  370. printk(KERN_ERR PFX "CQ_PAGE_TBL_HI = 0x%x.\n",
  371. ql_read32(qdev, CQ_PAGE_TBL_HI));
  372. printk(KERN_ERR PFX "COS_DFLT_CQ1 = 0x%x.\n",
  373. ql_read32(qdev, COS_DFLT_CQ1));
  374. printk(KERN_ERR PFX "COS_DFLT_CQ2 = 0x%x.\n",
  375. ql_read32(qdev, COS_DFLT_CQ2));
  376. printk(KERN_ERR PFX "SPLT_HDR = 0x%x.\n",
  377. ql_read32(qdev, SPLT_HDR));
  378. printk(KERN_ERR PFX "FC_PAUSE_THRES = 0x%x.\n",
  379. ql_read32(qdev, FC_PAUSE_THRES));
  380. printk(KERN_ERR PFX "NIC_PAUSE_THRES = 0x%x.\n",
  381. ql_read32(qdev, NIC_PAUSE_THRES));
  382. printk(KERN_ERR PFX "FC_ETHERTYPE = 0x%x.\n",
  383. ql_read32(qdev, FC_ETHERTYPE));
  384. printk(KERN_ERR PFX "FC_RCV_CFG = 0x%x.\n",
  385. ql_read32(qdev, FC_RCV_CFG));
  386. printk(KERN_ERR PFX "NIC_RCV_CFG = 0x%x.\n",
  387. ql_read32(qdev, NIC_RCV_CFG));
  388. printk(KERN_ERR PFX "FC_COS_TAGS = 0x%x.\n",
  389. ql_read32(qdev, FC_COS_TAGS));
  390. printk(KERN_ERR PFX "NIC_COS_TAGS = 0x%x.\n",
  391. ql_read32(qdev, NIC_COS_TAGS));
  392. printk(KERN_ERR PFX "MGMT_RCV_CFG = 0x%x.\n",
  393. ql_read32(qdev, MGMT_RCV_CFG));
  394. printk(KERN_ERR PFX "XG_SERDES_ADDR = 0x%x.\n",
  395. ql_read32(qdev, XG_SERDES_ADDR));
  396. printk(KERN_ERR PFX "XG_SERDES_DATA = 0x%x.\n",
  397. ql_read32(qdev, XG_SERDES_DATA));
  398. printk(KERN_ERR PFX "PRB_MX_ADDR = 0x%x.\n",
  399. ql_read32(qdev, PRB_MX_ADDR));
  400. printk(KERN_ERR PFX "PRB_MX_DATA = 0x%x.\n",
  401. ql_read32(qdev, PRB_MX_DATA));
  402. ql_dump_intr_states(qdev);
  403. ql_dump_xgmac_control_regs(qdev);
  404. ql_dump_ets_regs(qdev);
  405. ql_dump_cam_entries(qdev);
  406. ql_dump_routing_entries(qdev);
  407. }
  408. #endif
  409. #ifdef QL_STAT_DUMP
  410. void ql_dump_stat(struct ql_adapter *qdev)
  411. {
  412. printk(KERN_ERR "%s: Enter.\n", __func__);
  413. printk(KERN_ERR "tx_pkts = %ld\n",
  414. (unsigned long)qdev->nic_stats.tx_pkts);
  415. printk(KERN_ERR "tx_bytes = %ld\n",
  416. (unsigned long)qdev->nic_stats.tx_bytes);
  417. printk(KERN_ERR "tx_mcast_pkts = %ld.\n",
  418. (unsigned long)qdev->nic_stats.tx_mcast_pkts);
  419. printk(KERN_ERR "tx_bcast_pkts = %ld.\n",
  420. (unsigned long)qdev->nic_stats.tx_bcast_pkts);
  421. printk(KERN_ERR "tx_ucast_pkts = %ld.\n",
  422. (unsigned long)qdev->nic_stats.tx_ucast_pkts);
  423. printk(KERN_ERR "tx_ctl_pkts = %ld.\n",
  424. (unsigned long)qdev->nic_stats.tx_ctl_pkts);
  425. printk(KERN_ERR "tx_pause_pkts = %ld.\n",
  426. (unsigned long)qdev->nic_stats.tx_pause_pkts);
  427. printk(KERN_ERR "tx_64_pkt = %ld.\n",
  428. (unsigned long)qdev->nic_stats.tx_64_pkt);
  429. printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n",
  430. (unsigned long)qdev->nic_stats.tx_65_to_127_pkt);
  431. printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n",
  432. (unsigned long)qdev->nic_stats.tx_128_to_255_pkt);
  433. printk(KERN_ERR "tx_256_511_pkt = %ld.\n",
  434. (unsigned long)qdev->nic_stats.tx_256_511_pkt);
  435. printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n",
  436. (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt);
  437. printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n",
  438. (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt);
  439. printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n",
  440. (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt);
  441. printk(KERN_ERR "tx_undersize_pkt = %ld.\n",
  442. (unsigned long)qdev->nic_stats.tx_undersize_pkt);
  443. printk(KERN_ERR "tx_oversize_pkt = %ld.\n",
  444. (unsigned long)qdev->nic_stats.tx_oversize_pkt);
  445. printk(KERN_ERR "rx_bytes = %ld.\n",
  446. (unsigned long)qdev->nic_stats.rx_bytes);
  447. printk(KERN_ERR "rx_bytes_ok = %ld.\n",
  448. (unsigned long)qdev->nic_stats.rx_bytes_ok);
  449. printk(KERN_ERR "rx_pkts = %ld.\n",
  450. (unsigned long)qdev->nic_stats.rx_pkts);
  451. printk(KERN_ERR "rx_pkts_ok = %ld.\n",
  452. (unsigned long)qdev->nic_stats.rx_pkts_ok);
  453. printk(KERN_ERR "rx_bcast_pkts = %ld.\n",
  454. (unsigned long)qdev->nic_stats.rx_bcast_pkts);
  455. printk(KERN_ERR "rx_mcast_pkts = %ld.\n",
  456. (unsigned long)qdev->nic_stats.rx_mcast_pkts);
  457. printk(KERN_ERR "rx_ucast_pkts = %ld.\n",
  458. (unsigned long)qdev->nic_stats.rx_ucast_pkts);
  459. printk(KERN_ERR "rx_undersize_pkts = %ld.\n",
  460. (unsigned long)qdev->nic_stats.rx_undersize_pkts);
  461. printk(KERN_ERR "rx_oversize_pkts = %ld.\n",
  462. (unsigned long)qdev->nic_stats.rx_oversize_pkts);
  463. printk(KERN_ERR "rx_jabber_pkts = %ld.\n",
  464. (unsigned long)qdev->nic_stats.rx_jabber_pkts);
  465. printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n",
  466. (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts);
  467. printk(KERN_ERR "rx_drop_events = %ld.\n",
  468. (unsigned long)qdev->nic_stats.rx_drop_events);
  469. printk(KERN_ERR "rx_fcerr_pkts = %ld.\n",
  470. (unsigned long)qdev->nic_stats.rx_fcerr_pkts);
  471. printk(KERN_ERR "rx_align_err = %ld.\n",
  472. (unsigned long)qdev->nic_stats.rx_align_err);
  473. printk(KERN_ERR "rx_symbol_err = %ld.\n",
  474. (unsigned long)qdev->nic_stats.rx_symbol_err);
  475. printk(KERN_ERR "rx_mac_err = %ld.\n",
  476. (unsigned long)qdev->nic_stats.rx_mac_err);
  477. printk(KERN_ERR "rx_ctl_pkts = %ld.\n",
  478. (unsigned long)qdev->nic_stats.rx_ctl_pkts);
  479. printk(KERN_ERR "rx_pause_pkts = %ld.\n",
  480. (unsigned long)qdev->nic_stats.rx_pause_pkts);
  481. printk(KERN_ERR "rx_64_pkts = %ld.\n",
  482. (unsigned long)qdev->nic_stats.rx_64_pkts);
  483. printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n",
  484. (unsigned long)qdev->nic_stats.rx_65_to_127_pkts);
  485. printk(KERN_ERR "rx_128_255_pkts = %ld.\n",
  486. (unsigned long)qdev->nic_stats.rx_128_255_pkts);
  487. printk(KERN_ERR "rx_256_511_pkts = %ld.\n",
  488. (unsigned long)qdev->nic_stats.rx_256_511_pkts);
  489. printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n",
  490. (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts);
  491. printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n",
  492. (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts);
  493. printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n",
  494. (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts);
  495. printk(KERN_ERR "rx_len_err_pkts = %ld.\n",
  496. (unsigned long)qdev->nic_stats.rx_len_err_pkts);
  497. };
  498. #endif
  499. #ifdef QL_DEV_DUMP
  500. void ql_dump_qdev(struct ql_adapter *qdev)
  501. {
  502. int i;
  503. printk(KERN_ERR PFX "qdev->flags = %lx.\n",
  504. qdev->flags);
  505. printk(KERN_ERR PFX "qdev->vlgrp = %p.\n",
  506. qdev->vlgrp);
  507. printk(KERN_ERR PFX "qdev->pdev = %p.\n",
  508. qdev->pdev);
  509. printk(KERN_ERR PFX "qdev->ndev = %p.\n",
  510. qdev->ndev);
  511. printk(KERN_ERR PFX "qdev->chip_rev_id = %d.\n",
  512. qdev->chip_rev_id);
  513. printk(KERN_ERR PFX "qdev->reg_base = %p.\n",
  514. qdev->reg_base);
  515. printk(KERN_ERR PFX "qdev->doorbell_area = %p.\n",
  516. qdev->doorbell_area);
  517. printk(KERN_ERR PFX "qdev->doorbell_area_size = %d.\n",
  518. qdev->doorbell_area_size);
  519. printk(KERN_ERR PFX "msg_enable = %x.\n",
  520. qdev->msg_enable);
  521. printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area = %p.\n",
  522. qdev->rx_ring_shadow_reg_area);
  523. printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma = %llx.\n",
  524. (unsigned long long) qdev->rx_ring_shadow_reg_dma);
  525. printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area = %p.\n",
  526. qdev->tx_ring_shadow_reg_area);
  527. printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma = %llx.\n",
  528. (unsigned long long) qdev->tx_ring_shadow_reg_dma);
  529. printk(KERN_ERR PFX "qdev->intr_count = %d.\n",
  530. qdev->intr_count);
  531. if (qdev->msi_x_entry)
  532. for (i = 0; i < qdev->intr_count; i++) {
  533. printk(KERN_ERR PFX
  534. "msi_x_entry.[%d]vector = %d.\n", i,
  535. qdev->msi_x_entry[i].vector);
  536. printk(KERN_ERR PFX
  537. "msi_x_entry.[%d]entry = %d.\n", i,
  538. qdev->msi_x_entry[i].entry);
  539. }
  540. for (i = 0; i < qdev->intr_count; i++) {
  541. printk(KERN_ERR PFX
  542. "intr_context[%d].qdev = %p.\n", i,
  543. qdev->intr_context[i].qdev);
  544. printk(KERN_ERR PFX
  545. "intr_context[%d].intr = %d.\n", i,
  546. qdev->intr_context[i].intr);
  547. printk(KERN_ERR PFX
  548. "intr_context[%d].hooked = %d.\n", i,
  549. qdev->intr_context[i].hooked);
  550. printk(KERN_ERR PFX
  551. "intr_context[%d].intr_en_mask = 0x%08x.\n", i,
  552. qdev->intr_context[i].intr_en_mask);
  553. printk(KERN_ERR PFX
  554. "intr_context[%d].intr_dis_mask = 0x%08x.\n", i,
  555. qdev->intr_context[i].intr_dis_mask);
  556. printk(KERN_ERR PFX
  557. "intr_context[%d].intr_read_mask = 0x%08x.\n", i,
  558. qdev->intr_context[i].intr_read_mask);
  559. }
  560. printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count);
  561. printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count);
  562. printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size);
  563. printk(KERN_ERR PFX "qdev->ring_mem = %p.\n", qdev->ring_mem);
  564. printk(KERN_ERR PFX "qdev->intr_count = %d.\n", qdev->intr_count);
  565. printk(KERN_ERR PFX "qdev->tx_ring = %p.\n",
  566. qdev->tx_ring);
  567. printk(KERN_ERR PFX "qdev->rss_ring_count = %d.\n",
  568. qdev->rss_ring_count);
  569. printk(KERN_ERR PFX "qdev->rx_ring = %p.\n", qdev->rx_ring);
  570. printk(KERN_ERR PFX "qdev->default_rx_queue = %d.\n",
  571. qdev->default_rx_queue);
  572. printk(KERN_ERR PFX "qdev->xg_sem_mask = 0x%08x.\n",
  573. qdev->xg_sem_mask);
  574. printk(KERN_ERR PFX "qdev->port_link_up = 0x%08x.\n",
  575. qdev->port_link_up);
  576. printk(KERN_ERR PFX "qdev->port_init = 0x%08x.\n",
  577. qdev->port_init);
  578. }
  579. #endif
  580. #ifdef QL_CB_DUMP
  581. void ql_dump_wqicb(struct wqicb *wqicb)
  582. {
  583. printk(KERN_ERR PFX "Dumping wqicb stuff...\n");
  584. printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len));
  585. printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags));
  586. printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
  587. le16_to_cpu(wqicb->cq_id_rss));
  588. printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
  589. printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n",
  590. (unsigned long long) le64_to_cpu(wqicb->addr));
  591. printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n",
  592. (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
  593. }
  594. void ql_dump_tx_ring(struct tx_ring *tx_ring)
  595. {
  596. if (tx_ring == NULL)
  597. return;
  598. printk(KERN_ERR PFX
  599. "===================== Dumping tx_ring %d ===============.\n",
  600. tx_ring->wq_id);
  601. printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
  602. printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
  603. (unsigned long long) tx_ring->wq_base_dma);
  604. printk(KERN_ERR PFX
  605. "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n",
  606. tx_ring->cnsmr_idx_sh_reg,
  607. tx_ring->cnsmr_idx_sh_reg
  608. ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
  609. printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
  610. printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
  611. printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
  612. tx_ring->prod_idx_db_reg);
  613. printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n",
  614. tx_ring->valid_db_reg);
  615. printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx);
  616. printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id);
  617. printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id);
  618. printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q);
  619. printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n",
  620. atomic_read(&tx_ring->tx_count));
  621. }
  622. void ql_dump_ricb(struct ricb *ricb)
  623. {
  624. int i;
  625. printk(KERN_ERR PFX
  626. "===================== Dumping ricb ===============.\n");
  627. printk(KERN_ERR PFX "Dumping ricb stuff...\n");
  628. printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f);
  629. printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n",
  630. ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
  631. ricb->flags & RSS_L6K ? "RSS_L6K " : "",
  632. ricb->flags & RSS_LI ? "RSS_LI " : "",
  633. ricb->flags & RSS_LB ? "RSS_LB " : "",
  634. ricb->flags & RSS_LM ? "RSS_LM " : "",
  635. ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
  636. ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
  637. ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
  638. ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
  639. printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask));
  640. for (i = 0; i < 16; i++)
  641. printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i,
  642. le32_to_cpu(ricb->hash_cq_id[i]));
  643. for (i = 0; i < 10; i++)
  644. printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i,
  645. le32_to_cpu(ricb->ipv6_hash_key[i]));
  646. for (i = 0; i < 4; i++)
  647. printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i,
  648. le32_to_cpu(ricb->ipv4_hash_key[i]));
  649. }
  650. void ql_dump_cqicb(struct cqicb *cqicb)
  651. {
  652. printk(KERN_ERR PFX "Dumping cqicb stuff...\n");
  653. printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
  654. printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
  655. printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
  656. printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n",
  657. (unsigned long long) le64_to_cpu(cqicb->addr));
  658. printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n",
  659. (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
  660. printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
  661. le16_to_cpu(cqicb->pkt_delay));
  662. printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
  663. le16_to_cpu(cqicb->irq_delay));
  664. printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n",
  665. (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
  666. printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
  667. le16_to_cpu(cqicb->lbq_buf_size));
  668. printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
  669. le16_to_cpu(cqicb->lbq_len));
  670. printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n",
  671. (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
  672. printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
  673. le16_to_cpu(cqicb->sbq_buf_size));
  674. printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
  675. le16_to_cpu(cqicb->sbq_len));
  676. }
  677. void ql_dump_rx_ring(struct rx_ring *rx_ring)
  678. {
  679. if (rx_ring == NULL)
  680. return;
  681. printk(KERN_ERR PFX
  682. "===================== Dumping rx_ring %d ===============.\n",
  683. rx_ring->cq_id);
  684. printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n",
  685. rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
  686. rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
  687. rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
  688. printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb);
  689. printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base);
  690. printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n",
  691. (unsigned long long) rx_ring->cq_base_dma);
  692. printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
  693. printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
  694. printk(KERN_ERR PFX
  695. "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n",
  696. rx_ring->prod_idx_sh_reg,
  697. rx_ring->prod_idx_sh_reg
  698. ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
  699. printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
  700. (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
  701. printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
  702. rx_ring->cnsmr_idx_db_reg);
  703. printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx);
  704. printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry);
  705. printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n",
  706. rx_ring->valid_db_reg);
  707. printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base);
  708. printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n",
  709. (unsigned long long) rx_ring->lbq_base_dma);
  710. printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n",
  711. rx_ring->lbq_base_indirect);
  712. printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n",
  713. (unsigned long long) rx_ring->lbq_base_indirect_dma);
  714. printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq);
  715. printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len);
  716. printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size);
  717. printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n",
  718. rx_ring->lbq_prod_idx_db_reg);
  719. printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n",
  720. rx_ring->lbq_prod_idx);
  721. printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n",
  722. rx_ring->lbq_curr_idx);
  723. printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n",
  724. rx_ring->lbq_clean_idx);
  725. printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n",
  726. rx_ring->lbq_free_cnt);
  727. printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n",
  728. rx_ring->lbq_buf_size);
  729. printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base);
  730. printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n",
  731. (unsigned long long) rx_ring->sbq_base_dma);
  732. printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n",
  733. rx_ring->sbq_base_indirect);
  734. printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n",
  735. (unsigned long long) rx_ring->sbq_base_indirect_dma);
  736. printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq);
  737. printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len);
  738. printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size);
  739. printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n",
  740. rx_ring->sbq_prod_idx_db_reg);
  741. printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n",
  742. rx_ring->sbq_prod_idx);
  743. printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n",
  744. rx_ring->sbq_curr_idx);
  745. printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n",
  746. rx_ring->sbq_clean_idx);
  747. printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n",
  748. rx_ring->sbq_free_cnt);
  749. printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n",
  750. rx_ring->sbq_buf_size);
  751. printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id);
  752. printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq);
  753. printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu);
  754. printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev);
  755. }
  756. void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
  757. {
  758. void *ptr;
  759. printk(KERN_ERR PFX "%s: Enter.\n", __func__);
  760. ptr = kmalloc(size, GFP_ATOMIC);
  761. if (ptr == NULL) {
  762. printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n",
  763. __func__);
  764. return;
  765. }
  766. if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
  767. printk(KERN_ERR "%s: Failed to upload control block!\n",
  768. __func__);
  769. goto fail_it;
  770. }
  771. switch (bit) {
  772. case CFG_DRQ:
  773. ql_dump_wqicb((struct wqicb *)ptr);
  774. break;
  775. case CFG_DCQ:
  776. ql_dump_cqicb((struct cqicb *)ptr);
  777. break;
  778. case CFG_DR:
  779. ql_dump_ricb((struct ricb *)ptr);
  780. break;
  781. default:
  782. printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n",
  783. __func__, bit);
  784. break;
  785. }
  786. fail_it:
  787. kfree(ptr);
  788. }
  789. #endif
  790. #ifdef QL_OB_DUMP
  791. void ql_dump_tx_desc(struct tx_buf_desc *tbd)
  792. {
  793. printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
  794. le64_to_cpu((u64) tbd->addr));
  795. printk(KERN_ERR PFX "tbd->len = %d\n",
  796. le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
  797. printk(KERN_ERR PFX "tbd->flags = %s %s\n",
  798. tbd->len & TX_DESC_C ? "C" : ".",
  799. tbd->len & TX_DESC_E ? "E" : ".");
  800. tbd++;
  801. printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
  802. le64_to_cpu((u64) tbd->addr));
  803. printk(KERN_ERR PFX "tbd->len = %d\n",
  804. le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
  805. printk(KERN_ERR PFX "tbd->flags = %s %s\n",
  806. tbd->len & TX_DESC_C ? "C" : ".",
  807. tbd->len & TX_DESC_E ? "E" : ".");
  808. tbd++;
  809. printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
  810. le64_to_cpu((u64) tbd->addr));
  811. printk(KERN_ERR PFX "tbd->len = %d\n",
  812. le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
  813. printk(KERN_ERR PFX "tbd->flags = %s %s\n",
  814. tbd->len & TX_DESC_C ? "C" : ".",
  815. tbd->len & TX_DESC_E ? "E" : ".");
  816. }
  817. void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
  818. {
  819. struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
  820. (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
  821. struct tx_buf_desc *tbd;
  822. u16 frame_len;
  823. printk(KERN_ERR PFX "%s\n", __func__);
  824. printk(KERN_ERR PFX "opcode = %s\n",
  825. (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
  826. printk(KERN_ERR PFX "flags1 = %s %s %s %s %s\n",
  827. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
  828. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
  829. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
  830. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
  831. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
  832. printk(KERN_ERR PFX "flags2 = %s %s %s\n",
  833. ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
  834. ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
  835. ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
  836. printk(KERN_ERR PFX "flags3 = %s %s %s \n",
  837. ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
  838. ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
  839. ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
  840. printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid);
  841. printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx);
  842. printk(KERN_ERR PFX "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
  843. if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
  844. printk(KERN_ERR PFX "frame_len = %d\n",
  845. le32_to_cpu(ob_mac_tso_iocb->frame_len));
  846. printk(KERN_ERR PFX "mss = %d\n",
  847. le16_to_cpu(ob_mac_tso_iocb->mss));
  848. printk(KERN_ERR PFX "prot_hdr_len = %d\n",
  849. le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
  850. printk(KERN_ERR PFX "hdr_offset = 0x%.04x\n",
  851. le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
  852. frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
  853. } else {
  854. printk(KERN_ERR PFX "frame_len = %d\n",
  855. le16_to_cpu(ob_mac_iocb->frame_len));
  856. frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
  857. }
  858. tbd = &ob_mac_iocb->tbd[0];
  859. ql_dump_tx_desc(tbd);
  860. }
  861. void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
  862. {
  863. printk(KERN_ERR PFX "%s\n", __func__);
  864. printk(KERN_ERR PFX "opcode = %d\n", ob_mac_rsp->opcode);
  865. printk(KERN_ERR PFX "flags = %s %s %s %s %s %s %s\n",
  866. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
  867. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
  868. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
  869. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
  870. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
  871. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
  872. ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
  873. printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid);
  874. }
  875. #endif
  876. #ifdef QL_IB_DUMP
  877. void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
  878. {
  879. printk(KERN_ERR PFX "%s\n", __func__);
  880. printk(KERN_ERR PFX "opcode = 0x%x\n", ib_mac_rsp->opcode);
  881. printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n",
  882. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
  883. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
  884. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
  885. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
  886. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
  887. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
  888. if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
  889. printk(KERN_ERR PFX "%s%s%s Multicast.\n",
  890. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  891. IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
  892. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  893. IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
  894. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  895. IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
  896. printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n",
  897. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
  898. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
  899. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
  900. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
  901. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
  902. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
  903. printk(KERN_ERR PFX "%s%s%s%s%s error.\n",
  904. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  905. IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
  906. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  907. IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
  908. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  909. IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
  910. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  911. IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
  912. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  913. IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
  914. printk(KERN_ERR PFX "flags3 = %s%s.\n",
  915. ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
  916. ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
  917. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
  918. printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n",
  919. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  920. IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
  921. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  922. IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
  923. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  924. IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
  925. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  926. IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
  927. printk(KERN_ERR PFX "data_len = %d\n",
  928. le32_to_cpu(ib_mac_rsp->data_len));
  929. printk(KERN_ERR PFX "data_addr = 0x%llx\n",
  930. (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
  931. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
  932. printk(KERN_ERR PFX "rss = %x\n",
  933. le32_to_cpu(ib_mac_rsp->rss));
  934. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
  935. printk(KERN_ERR PFX "vlan_id = %x\n",
  936. le16_to_cpu(ib_mac_rsp->vlan_id));
  937. printk(KERN_ERR PFX "flags4 = %s%s%s.\n",
  938. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
  939. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
  940. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
  941. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
  942. printk(KERN_ERR PFX "hdr length = %d.\n",
  943. le32_to_cpu(ib_mac_rsp->hdr_len));
  944. printk(KERN_ERR PFX "hdr addr = 0x%llx.\n",
  945. (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
  946. }
  947. }
  948. #endif
  949. #ifdef QL_ALL_DUMP
  950. void ql_dump_all(struct ql_adapter *qdev)
  951. {
  952. int i;
  953. QL_DUMP_REGS(qdev);
  954. QL_DUMP_QDEV(qdev);
  955. for (i = 0; i < qdev->tx_ring_count; i++) {
  956. QL_DUMP_TX_RING(&qdev->tx_ring[i]);
  957. QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
  958. }
  959. for (i = 0; i < qdev->rx_ring_count; i++) {
  960. QL_DUMP_RX_RING(&qdev->rx_ring[i]);
  961. QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
  962. }
  963. }
  964. #endif