qlge_dbg.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551
  1. #include "qlge.h"
  2. static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
  3. {
  4. int status = 0;
  5. int i;
  6. for (i = 0; i < 8; i++, buf++) {
  7. ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
  8. *buf = ql_read32(qdev, NIC_ETS);
  9. }
  10. for (i = 0; i < 2; i++, buf++) {
  11. ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
  12. *buf = ql_read32(qdev, CNA_ETS);
  13. }
  14. return status;
  15. }
  16. static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
  17. {
  18. int i;
  19. for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
  20. ql_write32(qdev, INTR_EN,
  21. qdev->intr_context[i].intr_read_mask);
  22. *buf = ql_read32(qdev, INTR_EN);
  23. }
  24. }
  25. static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
  26. {
  27. int i, status;
  28. u32 value[3];
  29. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  30. if (status)
  31. return status;
  32. for (i = 0; i < 16; i++) {
  33. status = ql_get_mac_addr_reg(qdev,
  34. MAC_ADDR_TYPE_CAM_MAC, i, value);
  35. if (status) {
  36. QPRINTK(qdev, DRV, ERR,
  37. "Failed read of mac index register.\n");
  38. goto err;
  39. }
  40. *buf++ = value[0]; /* lower MAC address */
  41. *buf++ = value[1]; /* upper MAC address */
  42. *buf++ = value[2]; /* output */
  43. }
  44. for (i = 0; i < 32; i++) {
  45. status = ql_get_mac_addr_reg(qdev,
  46. MAC_ADDR_TYPE_MULTI_MAC, i, value);
  47. if (status) {
  48. QPRINTK(qdev, DRV, ERR,
  49. "Failed read of mac index register.\n");
  50. goto err;
  51. }
  52. *buf++ = value[0]; /* lower Mcast address */
  53. *buf++ = value[1]; /* upper Mcast address */
  54. }
  55. err:
  56. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  57. return status;
  58. }
  59. static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
  60. {
  61. int status;
  62. u32 value, i;
  63. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  64. if (status)
  65. return status;
  66. for (i = 0; i < 16; i++) {
  67. status = ql_get_routing_reg(qdev, i, &value);
  68. if (status) {
  69. QPRINTK(qdev, DRV, ERR,
  70. "Failed read of routing index register.\n");
  71. goto err;
  72. } else {
  73. *buf++ = value;
  74. }
  75. }
  76. err:
  77. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  78. return status;
  79. }
  80. /* Read the MPI Processor shadow registers */
  81. static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
  82. {
  83. u32 i;
  84. int status;
  85. for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
  86. status = ql_write_mpi_reg(qdev, RISC_124,
  87. (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
  88. if (status)
  89. goto end;
  90. status = ql_read_mpi_reg(qdev, RISC_127, buf);
  91. if (status)
  92. goto end;
  93. }
  94. end:
  95. return status;
  96. }
  97. /* Read the MPI Processor core registers */
  98. static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
  99. u32 offset, u32 count)
  100. {
  101. int i, status = 0;
  102. for (i = 0; i < count; i++, buf++) {
  103. status = ql_read_mpi_reg(qdev, offset + i, buf);
  104. if (status)
  105. return status;
  106. }
  107. return status;
  108. }
  109. /* Read out the routing index registers */
  110. static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
  111. {
  112. int status;
  113. u32 type, index, index_max;
  114. u32 result_index;
  115. u32 result_data;
  116. u32 val;
  117. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  118. if (status)
  119. return status;
  120. for (type = 0; type < 4; type++) {
  121. if (type < 2)
  122. index_max = 8;
  123. else
  124. index_max = 16;
  125. for (index = 0; index < index_max; index++) {
  126. val = RT_IDX_RS
  127. | (type << RT_IDX_TYPE_SHIFT)
  128. | (index << RT_IDX_IDX_SHIFT);
  129. ql_write32(qdev, RT_IDX, val);
  130. result_index = 0;
  131. while ((result_index & RT_IDX_MR) == 0)
  132. result_index = ql_read32(qdev, RT_IDX);
  133. result_data = ql_read32(qdev, RT_DATA);
  134. *buf = type;
  135. buf++;
  136. *buf = index;
  137. buf++;
  138. *buf = result_index;
  139. buf++;
  140. *buf = result_data;
  141. buf++;
  142. }
  143. }
  144. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  145. return status;
  146. }
  147. /* Read out the MAC protocol registers */
  148. static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
  149. {
  150. u32 result_index, result_data;
  151. u32 type;
  152. u32 index;
  153. u32 offset;
  154. u32 val;
  155. u32 initial_val = MAC_ADDR_RS;
  156. u32 max_index;
  157. u32 max_offset;
  158. for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
  159. switch (type) {
  160. case 0: /* CAM */
  161. initial_val |= MAC_ADDR_ADR;
  162. max_index = MAC_ADDR_MAX_CAM_ENTRIES;
  163. max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
  164. break;
  165. case 1: /* Multicast MAC Address */
  166. max_index = MAC_ADDR_MAX_CAM_WCOUNT;
  167. max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
  168. break;
  169. case 2: /* VLAN filter mask */
  170. case 3: /* MC filter mask */
  171. max_index = MAC_ADDR_MAX_CAM_WCOUNT;
  172. max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
  173. break;
  174. case 4: /* FC MAC addresses */
  175. max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
  176. max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
  177. break;
  178. case 5: /* Mgmt MAC addresses */
  179. max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
  180. max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
  181. break;
  182. case 6: /* Mgmt VLAN addresses */
  183. max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
  184. max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
  185. break;
  186. case 7: /* Mgmt IPv4 address */
  187. max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
  188. max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
  189. break;
  190. case 8: /* Mgmt IPv6 address */
  191. max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
  192. max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
  193. break;
  194. case 9: /* Mgmt TCP/UDP Dest port */
  195. max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
  196. max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
  197. break;
  198. default:
  199. printk(KERN_ERR"Bad type!!! 0x%08x\n", type);
  200. max_index = 0;
  201. max_offset = 0;
  202. break;
  203. }
  204. for (index = 0; index < max_index; index++) {
  205. for (offset = 0; offset < max_offset; offset++) {
  206. val = initial_val
  207. | (type << MAC_ADDR_TYPE_SHIFT)
  208. | (index << MAC_ADDR_IDX_SHIFT)
  209. | (offset);
  210. ql_write32(qdev, MAC_ADDR_IDX, val);
  211. result_index = 0;
  212. while ((result_index & MAC_ADDR_MR) == 0) {
  213. result_index = ql_read32(qdev,
  214. MAC_ADDR_IDX);
  215. }
  216. result_data = ql_read32(qdev, MAC_ADDR_DATA);
  217. *buf = result_index;
  218. buf++;
  219. *buf = result_data;
  220. buf++;
  221. }
  222. }
  223. }
  224. }
  225. static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
  226. {
  227. u32 func_num, reg, reg_val;
  228. int status;
  229. for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
  230. reg = MPI_NIC_REG_BLOCK
  231. | (func_num << MPI_NIC_FUNCTION_SHIFT)
  232. | (SEM / 4);
  233. status = ql_read_mpi_reg(qdev, reg, &reg_val);
  234. *buf = reg_val;
  235. /* if the read failed then dead fill the element. */
  236. if (!status)
  237. *buf = 0xdeadbeef;
  238. buf++;
  239. }
  240. }
  241. /* Create a coredump segment header */
  242. static void ql_build_coredump_seg_header(
  243. struct mpi_coredump_segment_header *seg_hdr,
  244. u32 seg_number, u32 seg_size, u8 *desc)
  245. {
  246. memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
  247. seg_hdr->cookie = MPI_COREDUMP_COOKIE;
  248. seg_hdr->segNum = seg_number;
  249. seg_hdr->segSize = seg_size;
  250. memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
  251. }
  252. /*
  253. * This function should be called when a coredump / probedump
  254. * is to be extracted from the HBA. It is assumed there is a
  255. * qdev structure that contains the base address of the register
  256. * space for this function as well as a coredump structure that
  257. * will contain the dump.
  258. */
  259. int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
  260. {
  261. int status;
  262. int i;
  263. if (!mpi_coredump) {
  264. QPRINTK(qdev, DRV, ERR,
  265. "No memory available.\n");
  266. return -ENOMEM;
  267. }
  268. /* Try to get the spinlock, but dont worry if
  269. * it isn't available. If the firmware died it
  270. * might be holding the sem.
  271. */
  272. ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
  273. status = ql_pause_mpi_risc(qdev);
  274. if (status) {
  275. QPRINTK(qdev, DRV, ERR,
  276. "Failed RISC pause. Status = 0x%.08x\n", status);
  277. goto err;
  278. }
  279. /* Insert the global header */
  280. memset(&(mpi_coredump->mpi_global_header), 0,
  281. sizeof(struct mpi_coredump_global_header));
  282. mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
  283. mpi_coredump->mpi_global_header.headerSize =
  284. sizeof(struct mpi_coredump_global_header);
  285. mpi_coredump->mpi_global_header.imageSize =
  286. sizeof(struct ql_mpi_coredump);
  287. memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
  288. sizeof(mpi_coredump->mpi_global_header.idString));
  289. /* Get generic NIC reg dump */
  290. ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
  291. NIC1_CONTROL_SEG_NUM,
  292. sizeof(struct mpi_coredump_segment_header) +
  293. sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
  294. if (qdev->func & 1) {
  295. /* Odd means our function is NIC 2 */
  296. for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
  297. mpi_coredump->nic2_regs[i] =
  298. ql_read32(qdev, i * sizeof(u32));
  299. } else {
  300. /* Even means our function is NIC 1 */
  301. for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
  302. mpi_coredump->nic_regs[i] =
  303. ql_read32(qdev, i * sizeof(u32));
  304. }
  305. ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
  306. CORE_SEG_NUM,
  307. sizeof(mpi_coredump->core_regs_seg_hdr) +
  308. sizeof(mpi_coredump->mpi_core_regs) +
  309. sizeof(mpi_coredump->mpi_core_sh_regs),
  310. "Core Registers");
  311. /* Get the MPI Core Registers */
  312. status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
  313. MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
  314. if (status)
  315. goto err;
  316. /* Get the 16 MPI shadow registers */
  317. status = ql_get_mpi_shadow_regs(qdev,
  318. &mpi_coredump->mpi_core_sh_regs[0]);
  319. if (status)
  320. goto err;
  321. /* Get the Test Logic Registers */
  322. ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
  323. TEST_LOGIC_SEG_NUM,
  324. sizeof(struct mpi_coredump_segment_header)
  325. + sizeof(mpi_coredump->test_logic_regs),
  326. "Test Logic Regs");
  327. status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
  328. TEST_REGS_ADDR, TEST_REGS_CNT);
  329. if (status)
  330. goto err;
  331. /* Get the RMII Registers */
  332. ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
  333. RMII_SEG_NUM,
  334. sizeof(struct mpi_coredump_segment_header)
  335. + sizeof(mpi_coredump->rmii_regs),
  336. "RMII Registers");
  337. status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
  338. RMII_REGS_ADDR, RMII_REGS_CNT);
  339. if (status)
  340. goto err;
  341. /* Get the FCMAC1 Registers */
  342. ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
  343. FCMAC1_SEG_NUM,
  344. sizeof(struct mpi_coredump_segment_header)
  345. + sizeof(mpi_coredump->fcmac1_regs),
  346. "FCMAC1 Registers");
  347. status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
  348. FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
  349. if (status)
  350. goto err;
  351. /* Get the FCMAC2 Registers */
  352. ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
  353. FCMAC2_SEG_NUM,
  354. sizeof(struct mpi_coredump_segment_header)
  355. + sizeof(mpi_coredump->fcmac2_regs),
  356. "FCMAC2 Registers");
  357. status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
  358. FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
  359. if (status)
  360. goto err;
  361. /* Get the FC1 MBX Registers */
  362. ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
  363. FC1_MBOX_SEG_NUM,
  364. sizeof(struct mpi_coredump_segment_header)
  365. + sizeof(mpi_coredump->fc1_mbx_regs),
  366. "FC1 MBox Regs");
  367. status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
  368. FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
  369. if (status)
  370. goto err;
  371. /* Get the IDE Registers */
  372. ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
  373. IDE_SEG_NUM,
  374. sizeof(struct mpi_coredump_segment_header)
  375. + sizeof(mpi_coredump->ide_regs),
  376. "IDE Registers");
  377. status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
  378. IDE_REGS_ADDR, IDE_REGS_CNT);
  379. if (status)
  380. goto err;
  381. /* Get the NIC1 MBX Registers */
  382. ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
  383. NIC1_MBOX_SEG_NUM,
  384. sizeof(struct mpi_coredump_segment_header)
  385. + sizeof(mpi_coredump->nic1_mbx_regs),
  386. "NIC1 MBox Regs");
  387. status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
  388. NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
  389. if (status)
  390. goto err;
  391. /* Get the SMBus Registers */
  392. ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
  393. SMBUS_SEG_NUM,
  394. sizeof(struct mpi_coredump_segment_header)
  395. + sizeof(mpi_coredump->smbus_regs),
  396. "SMBus Registers");
  397. status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
  398. SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
  399. if (status)
  400. goto err;
  401. /* Get the FC2 MBX Registers */
  402. ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
  403. FC2_MBOX_SEG_NUM,
  404. sizeof(struct mpi_coredump_segment_header)
  405. + sizeof(mpi_coredump->fc2_mbx_regs),
  406. "FC2 MBox Regs");
  407. status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
  408. FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
  409. if (status)
  410. goto err;
  411. /* Get the NIC2 MBX Registers */
  412. ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
  413. NIC2_MBOX_SEG_NUM,
  414. sizeof(struct mpi_coredump_segment_header)
  415. + sizeof(mpi_coredump->nic2_mbx_regs),
  416. "NIC2 MBox Regs");
  417. status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
  418. NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
  419. if (status)
  420. goto err;
  421. /* Get the I2C Registers */
  422. ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
  423. I2C_SEG_NUM,
  424. sizeof(struct mpi_coredump_segment_header)
  425. + sizeof(mpi_coredump->i2c_regs),
  426. "I2C Registers");
  427. status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
  428. I2C_REGS_ADDR, I2C_REGS_CNT);
  429. if (status)
  430. goto err;
  431. /* Get the MEMC Registers */
  432. ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
  433. MEMC_SEG_NUM,
  434. sizeof(struct mpi_coredump_segment_header)
  435. + sizeof(mpi_coredump->memc_regs),
  436. "MEMC Registers");
  437. status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
  438. MEMC_REGS_ADDR, MEMC_REGS_CNT);
  439. if (status)
  440. goto err;
  441. /* Get the PBus Registers */
  442. ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
  443. PBUS_SEG_NUM,
  444. sizeof(struct mpi_coredump_segment_header)
  445. + sizeof(mpi_coredump->pbus_regs),
  446. "PBUS Registers");
  447. status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
  448. PBUS_REGS_ADDR, PBUS_REGS_CNT);
  449. if (status)
  450. goto err;
  451. /* Get the MDE Registers */
  452. ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
  453. MDE_SEG_NUM,
  454. sizeof(struct mpi_coredump_segment_header)
  455. + sizeof(mpi_coredump->mde_regs),
  456. "MDE Registers");
  457. status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
  458. MDE_REGS_ADDR, MDE_REGS_CNT);
  459. if (status)
  460. goto err;
  461. ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
  462. MISC_NIC_INFO_SEG_NUM,
  463. sizeof(struct mpi_coredump_segment_header)
  464. + sizeof(mpi_coredump->misc_nic_info),
  465. "MISC NIC INFO");
  466. mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
  467. mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
  468. mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
  469. mpi_coredump->misc_nic_info.function = qdev->func;
  470. /* Segment 31 */
  471. /* Get indexed register values. */
  472. ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
  473. INTR_STATES_SEG_NUM,
  474. sizeof(struct mpi_coredump_segment_header)
  475. + sizeof(mpi_coredump->intr_states),
  476. "INTR States");
  477. ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
  478. ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
  479. CAM_ENTRIES_SEG_NUM,
  480. sizeof(struct mpi_coredump_segment_header)
  481. + sizeof(mpi_coredump->cam_entries),
  482. "CAM Entries");
  483. status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
  484. if (status)
  485. goto err;
  486. ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
  487. ROUTING_WORDS_SEG_NUM,
  488. sizeof(struct mpi_coredump_segment_header)
  489. + sizeof(mpi_coredump->nic_routing_words),
  490. "Routing Words");
  491. status = ql_get_routing_entries(qdev,
  492. &mpi_coredump->nic_routing_words[0]);
  493. if (status)
  494. goto err;
  495. /* Segment 34 (Rev C. step 23) */
  496. ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
  497. ETS_SEG_NUM,
  498. sizeof(struct mpi_coredump_segment_header)
  499. + sizeof(mpi_coredump->ets),
  500. "ETS Registers");
  501. status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
  502. if (status)
  503. goto err;
  504. ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
  505. ROUTING_INDEX_SEG_NUM,
  506. sizeof(struct mpi_coredump_segment_header)
  507. + sizeof(mpi_coredump->routing_regs),
  508. "Routing Regs");
  509. status = ql_get_routing_index_registers(qdev,
  510. &mpi_coredump->routing_regs[0]);
  511. if (status)
  512. goto err;
  513. ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
  514. MAC_PROTOCOL_SEG_NUM,
  515. sizeof(struct mpi_coredump_segment_header)
  516. + sizeof(mpi_coredump->mac_prot_regs),
  517. "MAC Prot Regs");
  518. ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
  519. /* Get the semaphore registers for all 5 functions */
  520. ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
  521. SEM_REGS_SEG_NUM,
  522. sizeof(struct mpi_coredump_segment_header) +
  523. sizeof(mpi_coredump->sem_regs), "Sem Registers");
  524. ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
  525. /* Prevent the mpi restarting while we dump the memory.*/
  526. ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
  527. /* clear the pause */
  528. status = ql_unpause_mpi_risc(qdev);
  529. if (status) {
  530. QPRINTK(qdev, DRV, ERR,
  531. "Failed RISC unpause. Status = 0x%.08x\n", status);
  532. goto err;
  533. }
  534. err:
  535. ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
  536. return status;
  537. }
  538. void ql_gen_reg_dump(struct ql_adapter *qdev,
  539. struct ql_reg_dump *mpi_coredump)
  540. {
  541. int i, status;
  542. memset(&(mpi_coredump->mpi_global_header), 0,
  543. sizeof(struct mpi_coredump_global_header));
  544. mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
  545. mpi_coredump->mpi_global_header.headerSize =
  546. sizeof(struct mpi_coredump_global_header);
  547. mpi_coredump->mpi_global_header.imageSize =
  548. sizeof(struct ql_reg_dump);
  549. memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
  550. sizeof(mpi_coredump->mpi_global_header.idString));
  551. /* segment 16 */
  552. ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
  553. MISC_NIC_INFO_SEG_NUM,
  554. sizeof(struct mpi_coredump_segment_header)
  555. + sizeof(mpi_coredump->misc_nic_info),
  556. "MISC NIC INFO");
  557. mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
  558. mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
  559. mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
  560. mpi_coredump->misc_nic_info.function = qdev->func;
  561. /* Segment 16, Rev C. Step 18 */
  562. ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
  563. NIC1_CONTROL_SEG_NUM,
  564. sizeof(struct mpi_coredump_segment_header)
  565. + sizeof(mpi_coredump->nic_regs),
  566. "NIC Registers");
  567. /* Get generic reg dump */
  568. for (i = 0; i < 64; i++)
  569. mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
  570. /* Segment 31 */
  571. /* Get indexed register values. */
  572. ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
  573. INTR_STATES_SEG_NUM,
  574. sizeof(struct mpi_coredump_segment_header)
  575. + sizeof(mpi_coredump->intr_states),
  576. "INTR States");
  577. ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
  578. ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
  579. CAM_ENTRIES_SEG_NUM,
  580. sizeof(struct mpi_coredump_segment_header)
  581. + sizeof(mpi_coredump->cam_entries),
  582. "CAM Entries");
  583. status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
  584. if (status)
  585. return;
  586. ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
  587. ROUTING_WORDS_SEG_NUM,
  588. sizeof(struct mpi_coredump_segment_header)
  589. + sizeof(mpi_coredump->nic_routing_words),
  590. "Routing Words");
  591. status = ql_get_routing_entries(qdev,
  592. &mpi_coredump->nic_routing_words[0]);
  593. if (status)
  594. return;
  595. /* Segment 34 (Rev C. step 23) */
  596. ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
  597. ETS_SEG_NUM,
  598. sizeof(struct mpi_coredump_segment_header)
  599. + sizeof(mpi_coredump->ets),
  600. "ETS Registers");
  601. status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
  602. if (status)
  603. return;
  604. }
  605. /* Coredump to messages log file using separate worker thread */
  606. void ql_mpi_core_to_log(struct work_struct *work)
  607. {
  608. struct ql_adapter *qdev =
  609. container_of(work, struct ql_adapter, mpi_core_to_log.work);
  610. u32 *tmp, count;
  611. int i;
  612. count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
  613. tmp = (u32 *)qdev->mpi_coredump;
  614. QPRINTK(qdev, DRV, DEBUG, "Core is dumping to log file!\n");
  615. for (i = 0; i < count; i += 8) {
  616. printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
  617. "%.08x %.08x %.08x \n", i,
  618. tmp[i + 0],
  619. tmp[i + 1],
  620. tmp[i + 2],
  621. tmp[i + 3],
  622. tmp[i + 4],
  623. tmp[i + 5],
  624. tmp[i + 6],
  625. tmp[i + 7]);
  626. msleep(5);
  627. }
  628. }
  629. #ifdef QL_REG_DUMP
  630. static void ql_dump_intr_states(struct ql_adapter *qdev)
  631. {
  632. int i;
  633. u32 value;
  634. for (i = 0; i < qdev->intr_count; i++) {
  635. ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
  636. value = ql_read32(qdev, INTR_EN);
  637. printk(KERN_ERR PFX
  638. "%s: Interrupt %d is %s.\n",
  639. qdev->ndev->name, i,
  640. (value & INTR_EN_EN ? "enabled" : "disabled"));
  641. }
  642. }
  643. void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
  644. {
  645. u32 data;
  646. if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
  647. printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__);
  648. return;
  649. }
  650. ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data);
  651. printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name,
  652. data);
  653. ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data);
  654. printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name,
  655. data);
  656. ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
  657. printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name,
  658. data);
  659. ql_read_xgmac_reg(qdev, TX_CFG, &data);
  660. printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
  661. ql_read_xgmac_reg(qdev, RX_CFG, &data);
  662. printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
  663. ql_read_xgmac_reg(qdev, FLOW_CTL, &data);
  664. printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name,
  665. data);
  666. ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data);
  667. printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name,
  668. data);
  669. ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data);
  670. printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name,
  671. data);
  672. ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data);
  673. printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n",
  674. qdev->ndev->name, data);
  675. ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data);
  676. printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n",
  677. qdev->ndev->name, data);
  678. ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data);
  679. printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
  680. data);
  681. ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data);
  682. printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
  683. data);
  684. ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data);
  685. printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name,
  686. data);
  687. ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data);
  688. printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n",
  689. qdev->ndev->name, data);
  690. ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data);
  691. printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name,
  692. data);
  693. ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data);
  694. printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n",
  695. qdev->ndev->name, data);
  696. ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data);
  697. printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name,
  698. data);
  699. ql_sem_unlock(qdev, qdev->xg_sem_mask);
  700. }
  701. static void ql_dump_ets_regs(struct ql_adapter *qdev)
  702. {
  703. }
  704. static void ql_dump_cam_entries(struct ql_adapter *qdev)
  705. {
  706. int i;
  707. u32 value[3];
  708. i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  709. if (i)
  710. return;
  711. for (i = 0; i < 4; i++) {
  712. if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
  713. printk(KERN_ERR PFX
  714. "%s: Failed read of mac index register.\n",
  715. __func__);
  716. return;
  717. } else {
  718. if (value[0])
  719. printk(KERN_ERR PFX
  720. "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n",
  721. qdev->ndev->name, i, value[1], value[0],
  722. value[2]);
  723. }
  724. }
  725. for (i = 0; i < 32; i++) {
  726. if (ql_get_mac_addr_reg
  727. (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
  728. printk(KERN_ERR PFX
  729. "%s: Failed read of mac index register.\n",
  730. __func__);
  731. return;
  732. } else {
  733. if (value[0])
  734. printk(KERN_ERR PFX
  735. "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n",
  736. qdev->ndev->name, i, value[1], value[0]);
  737. }
  738. }
  739. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  740. }
  741. void ql_dump_routing_entries(struct ql_adapter *qdev)
  742. {
  743. int i;
  744. u32 value;
  745. i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  746. if (i)
  747. return;
  748. for (i = 0; i < 16; i++) {
  749. value = 0;
  750. if (ql_get_routing_reg(qdev, i, &value)) {
  751. printk(KERN_ERR PFX
  752. "%s: Failed read of routing index register.\n",
  753. __func__);
  754. return;
  755. } else {
  756. if (value)
  757. printk(KERN_ERR PFX
  758. "%s: Routing Mask %d = 0x%.08x.\n",
  759. qdev->ndev->name, i, value);
  760. }
  761. }
  762. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  763. }
  764. void ql_dump_regs(struct ql_adapter *qdev)
  765. {
  766. printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func);
  767. printk(KERN_ERR PFX "SYS = 0x%x.\n",
  768. ql_read32(qdev, SYS));
  769. printk(KERN_ERR PFX "RST_FO = 0x%x.\n",
  770. ql_read32(qdev, RST_FO));
  771. printk(KERN_ERR PFX "FSC = 0x%x.\n",
  772. ql_read32(qdev, FSC));
  773. printk(KERN_ERR PFX "CSR = 0x%x.\n",
  774. ql_read32(qdev, CSR));
  775. printk(KERN_ERR PFX "ICB_RID = 0x%x.\n",
  776. ql_read32(qdev, ICB_RID));
  777. printk(KERN_ERR PFX "ICB_L = 0x%x.\n",
  778. ql_read32(qdev, ICB_L));
  779. printk(KERN_ERR PFX "ICB_H = 0x%x.\n",
  780. ql_read32(qdev, ICB_H));
  781. printk(KERN_ERR PFX "CFG = 0x%x.\n",
  782. ql_read32(qdev, CFG));
  783. printk(KERN_ERR PFX "BIOS_ADDR = 0x%x.\n",
  784. ql_read32(qdev, BIOS_ADDR));
  785. printk(KERN_ERR PFX "STS = 0x%x.\n",
  786. ql_read32(qdev, STS));
  787. printk(KERN_ERR PFX "INTR_EN = 0x%x.\n",
  788. ql_read32(qdev, INTR_EN));
  789. printk(KERN_ERR PFX "INTR_MASK = 0x%x.\n",
  790. ql_read32(qdev, INTR_MASK));
  791. printk(KERN_ERR PFX "ISR1 = 0x%x.\n",
  792. ql_read32(qdev, ISR1));
  793. printk(KERN_ERR PFX "ISR2 = 0x%x.\n",
  794. ql_read32(qdev, ISR2));
  795. printk(KERN_ERR PFX "ISR3 = 0x%x.\n",
  796. ql_read32(qdev, ISR3));
  797. printk(KERN_ERR PFX "ISR4 = 0x%x.\n",
  798. ql_read32(qdev, ISR4));
  799. printk(KERN_ERR PFX "REV_ID = 0x%x.\n",
  800. ql_read32(qdev, REV_ID));
  801. printk(KERN_ERR PFX "FRC_ECC_ERR = 0x%x.\n",
  802. ql_read32(qdev, FRC_ECC_ERR));
  803. printk(KERN_ERR PFX "ERR_STS = 0x%x.\n",
  804. ql_read32(qdev, ERR_STS));
  805. printk(KERN_ERR PFX "RAM_DBG_ADDR = 0x%x.\n",
  806. ql_read32(qdev, RAM_DBG_ADDR));
  807. printk(KERN_ERR PFX "RAM_DBG_DATA = 0x%x.\n",
  808. ql_read32(qdev, RAM_DBG_DATA));
  809. printk(KERN_ERR PFX "ECC_ERR_CNT = 0x%x.\n",
  810. ql_read32(qdev, ECC_ERR_CNT));
  811. printk(KERN_ERR PFX "SEM = 0x%x.\n",
  812. ql_read32(qdev, SEM));
  813. printk(KERN_ERR PFX "GPIO_1 = 0x%x.\n",
  814. ql_read32(qdev, GPIO_1));
  815. printk(KERN_ERR PFX "GPIO_2 = 0x%x.\n",
  816. ql_read32(qdev, GPIO_2));
  817. printk(KERN_ERR PFX "GPIO_3 = 0x%x.\n",
  818. ql_read32(qdev, GPIO_3));
  819. printk(KERN_ERR PFX "XGMAC_ADDR = 0x%x.\n",
  820. ql_read32(qdev, XGMAC_ADDR));
  821. printk(KERN_ERR PFX "XGMAC_DATA = 0x%x.\n",
  822. ql_read32(qdev, XGMAC_DATA));
  823. printk(KERN_ERR PFX "NIC_ETS = 0x%x.\n",
  824. ql_read32(qdev, NIC_ETS));
  825. printk(KERN_ERR PFX "CNA_ETS = 0x%x.\n",
  826. ql_read32(qdev, CNA_ETS));
  827. printk(KERN_ERR PFX "FLASH_ADDR = 0x%x.\n",
  828. ql_read32(qdev, FLASH_ADDR));
  829. printk(KERN_ERR PFX "FLASH_DATA = 0x%x.\n",
  830. ql_read32(qdev, FLASH_DATA));
  831. printk(KERN_ERR PFX "CQ_STOP = 0x%x.\n",
  832. ql_read32(qdev, CQ_STOP));
  833. printk(KERN_ERR PFX "PAGE_TBL_RID = 0x%x.\n",
  834. ql_read32(qdev, PAGE_TBL_RID));
  835. printk(KERN_ERR PFX "WQ_PAGE_TBL_LO = 0x%x.\n",
  836. ql_read32(qdev, WQ_PAGE_TBL_LO));
  837. printk(KERN_ERR PFX "WQ_PAGE_TBL_HI = 0x%x.\n",
  838. ql_read32(qdev, WQ_PAGE_TBL_HI));
  839. printk(KERN_ERR PFX "CQ_PAGE_TBL_LO = 0x%x.\n",
  840. ql_read32(qdev, CQ_PAGE_TBL_LO));
  841. printk(KERN_ERR PFX "CQ_PAGE_TBL_HI = 0x%x.\n",
  842. ql_read32(qdev, CQ_PAGE_TBL_HI));
  843. printk(KERN_ERR PFX "COS_DFLT_CQ1 = 0x%x.\n",
  844. ql_read32(qdev, COS_DFLT_CQ1));
  845. printk(KERN_ERR PFX "COS_DFLT_CQ2 = 0x%x.\n",
  846. ql_read32(qdev, COS_DFLT_CQ2));
  847. printk(KERN_ERR PFX "SPLT_HDR = 0x%x.\n",
  848. ql_read32(qdev, SPLT_HDR));
  849. printk(KERN_ERR PFX "FC_PAUSE_THRES = 0x%x.\n",
  850. ql_read32(qdev, FC_PAUSE_THRES));
  851. printk(KERN_ERR PFX "NIC_PAUSE_THRES = 0x%x.\n",
  852. ql_read32(qdev, NIC_PAUSE_THRES));
  853. printk(KERN_ERR PFX "FC_ETHERTYPE = 0x%x.\n",
  854. ql_read32(qdev, FC_ETHERTYPE));
  855. printk(KERN_ERR PFX "FC_RCV_CFG = 0x%x.\n",
  856. ql_read32(qdev, FC_RCV_CFG));
  857. printk(KERN_ERR PFX "NIC_RCV_CFG = 0x%x.\n",
  858. ql_read32(qdev, NIC_RCV_CFG));
  859. printk(KERN_ERR PFX "FC_COS_TAGS = 0x%x.\n",
  860. ql_read32(qdev, FC_COS_TAGS));
  861. printk(KERN_ERR PFX "NIC_COS_TAGS = 0x%x.\n",
  862. ql_read32(qdev, NIC_COS_TAGS));
  863. printk(KERN_ERR PFX "MGMT_RCV_CFG = 0x%x.\n",
  864. ql_read32(qdev, MGMT_RCV_CFG));
  865. printk(KERN_ERR PFX "XG_SERDES_ADDR = 0x%x.\n",
  866. ql_read32(qdev, XG_SERDES_ADDR));
  867. printk(KERN_ERR PFX "XG_SERDES_DATA = 0x%x.\n",
  868. ql_read32(qdev, XG_SERDES_DATA));
  869. printk(KERN_ERR PFX "PRB_MX_ADDR = 0x%x.\n",
  870. ql_read32(qdev, PRB_MX_ADDR));
  871. printk(KERN_ERR PFX "PRB_MX_DATA = 0x%x.\n",
  872. ql_read32(qdev, PRB_MX_DATA));
  873. ql_dump_intr_states(qdev);
  874. ql_dump_xgmac_control_regs(qdev);
  875. ql_dump_ets_regs(qdev);
  876. ql_dump_cam_entries(qdev);
  877. ql_dump_routing_entries(qdev);
  878. }
  879. #endif
  880. #ifdef QL_STAT_DUMP
  881. void ql_dump_stat(struct ql_adapter *qdev)
  882. {
  883. printk(KERN_ERR "%s: Enter.\n", __func__);
  884. printk(KERN_ERR "tx_pkts = %ld\n",
  885. (unsigned long)qdev->nic_stats.tx_pkts);
  886. printk(KERN_ERR "tx_bytes = %ld\n",
  887. (unsigned long)qdev->nic_stats.tx_bytes);
  888. printk(KERN_ERR "tx_mcast_pkts = %ld.\n",
  889. (unsigned long)qdev->nic_stats.tx_mcast_pkts);
  890. printk(KERN_ERR "tx_bcast_pkts = %ld.\n",
  891. (unsigned long)qdev->nic_stats.tx_bcast_pkts);
  892. printk(KERN_ERR "tx_ucast_pkts = %ld.\n",
  893. (unsigned long)qdev->nic_stats.tx_ucast_pkts);
  894. printk(KERN_ERR "tx_ctl_pkts = %ld.\n",
  895. (unsigned long)qdev->nic_stats.tx_ctl_pkts);
  896. printk(KERN_ERR "tx_pause_pkts = %ld.\n",
  897. (unsigned long)qdev->nic_stats.tx_pause_pkts);
  898. printk(KERN_ERR "tx_64_pkt = %ld.\n",
  899. (unsigned long)qdev->nic_stats.tx_64_pkt);
  900. printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n",
  901. (unsigned long)qdev->nic_stats.tx_65_to_127_pkt);
  902. printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n",
  903. (unsigned long)qdev->nic_stats.tx_128_to_255_pkt);
  904. printk(KERN_ERR "tx_256_511_pkt = %ld.\n",
  905. (unsigned long)qdev->nic_stats.tx_256_511_pkt);
  906. printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n",
  907. (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt);
  908. printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n",
  909. (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt);
  910. printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n",
  911. (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt);
  912. printk(KERN_ERR "tx_undersize_pkt = %ld.\n",
  913. (unsigned long)qdev->nic_stats.tx_undersize_pkt);
  914. printk(KERN_ERR "tx_oversize_pkt = %ld.\n",
  915. (unsigned long)qdev->nic_stats.tx_oversize_pkt);
  916. printk(KERN_ERR "rx_bytes = %ld.\n",
  917. (unsigned long)qdev->nic_stats.rx_bytes);
  918. printk(KERN_ERR "rx_bytes_ok = %ld.\n",
  919. (unsigned long)qdev->nic_stats.rx_bytes_ok);
  920. printk(KERN_ERR "rx_pkts = %ld.\n",
  921. (unsigned long)qdev->nic_stats.rx_pkts);
  922. printk(KERN_ERR "rx_pkts_ok = %ld.\n",
  923. (unsigned long)qdev->nic_stats.rx_pkts_ok);
  924. printk(KERN_ERR "rx_bcast_pkts = %ld.\n",
  925. (unsigned long)qdev->nic_stats.rx_bcast_pkts);
  926. printk(KERN_ERR "rx_mcast_pkts = %ld.\n",
  927. (unsigned long)qdev->nic_stats.rx_mcast_pkts);
  928. printk(KERN_ERR "rx_ucast_pkts = %ld.\n",
  929. (unsigned long)qdev->nic_stats.rx_ucast_pkts);
  930. printk(KERN_ERR "rx_undersize_pkts = %ld.\n",
  931. (unsigned long)qdev->nic_stats.rx_undersize_pkts);
  932. printk(KERN_ERR "rx_oversize_pkts = %ld.\n",
  933. (unsigned long)qdev->nic_stats.rx_oversize_pkts);
  934. printk(KERN_ERR "rx_jabber_pkts = %ld.\n",
  935. (unsigned long)qdev->nic_stats.rx_jabber_pkts);
  936. printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n",
  937. (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts);
  938. printk(KERN_ERR "rx_drop_events = %ld.\n",
  939. (unsigned long)qdev->nic_stats.rx_drop_events);
  940. printk(KERN_ERR "rx_fcerr_pkts = %ld.\n",
  941. (unsigned long)qdev->nic_stats.rx_fcerr_pkts);
  942. printk(KERN_ERR "rx_align_err = %ld.\n",
  943. (unsigned long)qdev->nic_stats.rx_align_err);
  944. printk(KERN_ERR "rx_symbol_err = %ld.\n",
  945. (unsigned long)qdev->nic_stats.rx_symbol_err);
  946. printk(KERN_ERR "rx_mac_err = %ld.\n",
  947. (unsigned long)qdev->nic_stats.rx_mac_err);
  948. printk(KERN_ERR "rx_ctl_pkts = %ld.\n",
  949. (unsigned long)qdev->nic_stats.rx_ctl_pkts);
  950. printk(KERN_ERR "rx_pause_pkts = %ld.\n",
  951. (unsigned long)qdev->nic_stats.rx_pause_pkts);
  952. printk(KERN_ERR "rx_64_pkts = %ld.\n",
  953. (unsigned long)qdev->nic_stats.rx_64_pkts);
  954. printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n",
  955. (unsigned long)qdev->nic_stats.rx_65_to_127_pkts);
  956. printk(KERN_ERR "rx_128_255_pkts = %ld.\n",
  957. (unsigned long)qdev->nic_stats.rx_128_255_pkts);
  958. printk(KERN_ERR "rx_256_511_pkts = %ld.\n",
  959. (unsigned long)qdev->nic_stats.rx_256_511_pkts);
  960. printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n",
  961. (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts);
  962. printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n",
  963. (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts);
  964. printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n",
  965. (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts);
  966. printk(KERN_ERR "rx_len_err_pkts = %ld.\n",
  967. (unsigned long)qdev->nic_stats.rx_len_err_pkts);
  968. };
  969. #endif
  970. #ifdef QL_DEV_DUMP
  971. void ql_dump_qdev(struct ql_adapter *qdev)
  972. {
  973. int i;
  974. printk(KERN_ERR PFX "qdev->flags = %lx.\n",
  975. qdev->flags);
  976. printk(KERN_ERR PFX "qdev->vlgrp = %p.\n",
  977. qdev->vlgrp);
  978. printk(KERN_ERR PFX "qdev->pdev = %p.\n",
  979. qdev->pdev);
  980. printk(KERN_ERR PFX "qdev->ndev = %p.\n",
  981. qdev->ndev);
  982. printk(KERN_ERR PFX "qdev->chip_rev_id = %d.\n",
  983. qdev->chip_rev_id);
  984. printk(KERN_ERR PFX "qdev->reg_base = %p.\n",
  985. qdev->reg_base);
  986. printk(KERN_ERR PFX "qdev->doorbell_area = %p.\n",
  987. qdev->doorbell_area);
  988. printk(KERN_ERR PFX "qdev->doorbell_area_size = %d.\n",
  989. qdev->doorbell_area_size);
  990. printk(KERN_ERR PFX "msg_enable = %x.\n",
  991. qdev->msg_enable);
  992. printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area = %p.\n",
  993. qdev->rx_ring_shadow_reg_area);
  994. printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma = %llx.\n",
  995. (unsigned long long) qdev->rx_ring_shadow_reg_dma);
  996. printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area = %p.\n",
  997. qdev->tx_ring_shadow_reg_area);
  998. printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma = %llx.\n",
  999. (unsigned long long) qdev->tx_ring_shadow_reg_dma);
  1000. printk(KERN_ERR PFX "qdev->intr_count = %d.\n",
  1001. qdev->intr_count);
  1002. if (qdev->msi_x_entry)
  1003. for (i = 0; i < qdev->intr_count; i++) {
  1004. printk(KERN_ERR PFX
  1005. "msi_x_entry.[%d]vector = %d.\n", i,
  1006. qdev->msi_x_entry[i].vector);
  1007. printk(KERN_ERR PFX
  1008. "msi_x_entry.[%d]entry = %d.\n", i,
  1009. qdev->msi_x_entry[i].entry);
  1010. }
  1011. for (i = 0; i < qdev->intr_count; i++) {
  1012. printk(KERN_ERR PFX
  1013. "intr_context[%d].qdev = %p.\n", i,
  1014. qdev->intr_context[i].qdev);
  1015. printk(KERN_ERR PFX
  1016. "intr_context[%d].intr = %d.\n", i,
  1017. qdev->intr_context[i].intr);
  1018. printk(KERN_ERR PFX
  1019. "intr_context[%d].hooked = %d.\n", i,
  1020. qdev->intr_context[i].hooked);
  1021. printk(KERN_ERR PFX
  1022. "intr_context[%d].intr_en_mask = 0x%08x.\n", i,
  1023. qdev->intr_context[i].intr_en_mask);
  1024. printk(KERN_ERR PFX
  1025. "intr_context[%d].intr_dis_mask = 0x%08x.\n", i,
  1026. qdev->intr_context[i].intr_dis_mask);
  1027. printk(KERN_ERR PFX
  1028. "intr_context[%d].intr_read_mask = 0x%08x.\n", i,
  1029. qdev->intr_context[i].intr_read_mask);
  1030. }
  1031. printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count);
  1032. printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count);
  1033. printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size);
  1034. printk(KERN_ERR PFX "qdev->ring_mem = %p.\n", qdev->ring_mem);
  1035. printk(KERN_ERR PFX "qdev->intr_count = %d.\n", qdev->intr_count);
  1036. printk(KERN_ERR PFX "qdev->tx_ring = %p.\n",
  1037. qdev->tx_ring);
  1038. printk(KERN_ERR PFX "qdev->rss_ring_count = %d.\n",
  1039. qdev->rss_ring_count);
  1040. printk(KERN_ERR PFX "qdev->rx_ring = %p.\n", qdev->rx_ring);
  1041. printk(KERN_ERR PFX "qdev->default_rx_queue = %d.\n",
  1042. qdev->default_rx_queue);
  1043. printk(KERN_ERR PFX "qdev->xg_sem_mask = 0x%08x.\n",
  1044. qdev->xg_sem_mask);
  1045. printk(KERN_ERR PFX "qdev->port_link_up = 0x%08x.\n",
  1046. qdev->port_link_up);
  1047. printk(KERN_ERR PFX "qdev->port_init = 0x%08x.\n",
  1048. qdev->port_init);
  1049. }
  1050. #endif
  1051. #ifdef QL_CB_DUMP
  1052. void ql_dump_wqicb(struct wqicb *wqicb)
  1053. {
  1054. printk(KERN_ERR PFX "Dumping wqicb stuff...\n");
  1055. printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len));
  1056. printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags));
  1057. printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
  1058. le16_to_cpu(wqicb->cq_id_rss));
  1059. printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
  1060. printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n",
  1061. (unsigned long long) le64_to_cpu(wqicb->addr));
  1062. printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n",
  1063. (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
  1064. }
  1065. void ql_dump_tx_ring(struct tx_ring *tx_ring)
  1066. {
  1067. if (tx_ring == NULL)
  1068. return;
  1069. printk(KERN_ERR PFX
  1070. "===================== Dumping tx_ring %d ===============.\n",
  1071. tx_ring->wq_id);
  1072. printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
  1073. printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
  1074. (unsigned long long) tx_ring->wq_base_dma);
  1075. printk(KERN_ERR PFX
  1076. "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n",
  1077. tx_ring->cnsmr_idx_sh_reg,
  1078. tx_ring->cnsmr_idx_sh_reg
  1079. ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
  1080. printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
  1081. printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
  1082. printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
  1083. tx_ring->prod_idx_db_reg);
  1084. printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n",
  1085. tx_ring->valid_db_reg);
  1086. printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx);
  1087. printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id);
  1088. printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id);
  1089. printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q);
  1090. printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n",
  1091. atomic_read(&tx_ring->tx_count));
  1092. }
  1093. void ql_dump_ricb(struct ricb *ricb)
  1094. {
  1095. int i;
  1096. printk(KERN_ERR PFX
  1097. "===================== Dumping ricb ===============.\n");
  1098. printk(KERN_ERR PFX "Dumping ricb stuff...\n");
  1099. printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f);
  1100. printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n",
  1101. ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
  1102. ricb->flags & RSS_L6K ? "RSS_L6K " : "",
  1103. ricb->flags & RSS_LI ? "RSS_LI " : "",
  1104. ricb->flags & RSS_LB ? "RSS_LB " : "",
  1105. ricb->flags & RSS_LM ? "RSS_LM " : "",
  1106. ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
  1107. ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
  1108. ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
  1109. ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
  1110. printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask));
  1111. for (i = 0; i < 16; i++)
  1112. printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i,
  1113. le32_to_cpu(ricb->hash_cq_id[i]));
  1114. for (i = 0; i < 10; i++)
  1115. printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i,
  1116. le32_to_cpu(ricb->ipv6_hash_key[i]));
  1117. for (i = 0; i < 4; i++)
  1118. printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i,
  1119. le32_to_cpu(ricb->ipv4_hash_key[i]));
  1120. }
  1121. void ql_dump_cqicb(struct cqicb *cqicb)
  1122. {
  1123. printk(KERN_ERR PFX "Dumping cqicb stuff...\n");
  1124. printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
  1125. printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
  1126. printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
  1127. printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n",
  1128. (unsigned long long) le64_to_cpu(cqicb->addr));
  1129. printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n",
  1130. (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
  1131. printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
  1132. le16_to_cpu(cqicb->pkt_delay));
  1133. printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
  1134. le16_to_cpu(cqicb->irq_delay));
  1135. printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n",
  1136. (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
  1137. printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
  1138. le16_to_cpu(cqicb->lbq_buf_size));
  1139. printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
  1140. le16_to_cpu(cqicb->lbq_len));
  1141. printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n",
  1142. (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
  1143. printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
  1144. le16_to_cpu(cqicb->sbq_buf_size));
  1145. printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
  1146. le16_to_cpu(cqicb->sbq_len));
  1147. }
  1148. void ql_dump_rx_ring(struct rx_ring *rx_ring)
  1149. {
  1150. if (rx_ring == NULL)
  1151. return;
  1152. printk(KERN_ERR PFX
  1153. "===================== Dumping rx_ring %d ===============.\n",
  1154. rx_ring->cq_id);
  1155. printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n",
  1156. rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
  1157. rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
  1158. rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
  1159. printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb);
  1160. printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base);
  1161. printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n",
  1162. (unsigned long long) rx_ring->cq_base_dma);
  1163. printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
  1164. printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
  1165. printk(KERN_ERR PFX
  1166. "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n",
  1167. rx_ring->prod_idx_sh_reg,
  1168. rx_ring->prod_idx_sh_reg
  1169. ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
  1170. printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
  1171. (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
  1172. printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
  1173. rx_ring->cnsmr_idx_db_reg);
  1174. printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx);
  1175. printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry);
  1176. printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n",
  1177. rx_ring->valid_db_reg);
  1178. printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base);
  1179. printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n",
  1180. (unsigned long long) rx_ring->lbq_base_dma);
  1181. printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n",
  1182. rx_ring->lbq_base_indirect);
  1183. printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n",
  1184. (unsigned long long) rx_ring->lbq_base_indirect_dma);
  1185. printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq);
  1186. printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len);
  1187. printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size);
  1188. printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n",
  1189. rx_ring->lbq_prod_idx_db_reg);
  1190. printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n",
  1191. rx_ring->lbq_prod_idx);
  1192. printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n",
  1193. rx_ring->lbq_curr_idx);
  1194. printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n",
  1195. rx_ring->lbq_clean_idx);
  1196. printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n",
  1197. rx_ring->lbq_free_cnt);
  1198. printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n",
  1199. rx_ring->lbq_buf_size);
  1200. printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base);
  1201. printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n",
  1202. (unsigned long long) rx_ring->sbq_base_dma);
  1203. printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n",
  1204. rx_ring->sbq_base_indirect);
  1205. printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n",
  1206. (unsigned long long) rx_ring->sbq_base_indirect_dma);
  1207. printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq);
  1208. printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len);
  1209. printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size);
  1210. printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n",
  1211. rx_ring->sbq_prod_idx_db_reg);
  1212. printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n",
  1213. rx_ring->sbq_prod_idx);
  1214. printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n",
  1215. rx_ring->sbq_curr_idx);
  1216. printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n",
  1217. rx_ring->sbq_clean_idx);
  1218. printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n",
  1219. rx_ring->sbq_free_cnt);
  1220. printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n",
  1221. rx_ring->sbq_buf_size);
  1222. printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id);
  1223. printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq);
  1224. printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu);
  1225. printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev);
  1226. }
  1227. void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
  1228. {
  1229. void *ptr;
  1230. printk(KERN_ERR PFX "%s: Enter.\n", __func__);
  1231. ptr = kmalloc(size, GFP_ATOMIC);
  1232. if (ptr == NULL) {
  1233. printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n",
  1234. __func__);
  1235. return;
  1236. }
  1237. if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
  1238. printk(KERN_ERR "%s: Failed to upload control block!\n",
  1239. __func__);
  1240. goto fail_it;
  1241. }
  1242. switch (bit) {
  1243. case CFG_DRQ:
  1244. ql_dump_wqicb((struct wqicb *)ptr);
  1245. break;
  1246. case CFG_DCQ:
  1247. ql_dump_cqicb((struct cqicb *)ptr);
  1248. break;
  1249. case CFG_DR:
  1250. ql_dump_ricb((struct ricb *)ptr);
  1251. break;
  1252. default:
  1253. printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n",
  1254. __func__, bit);
  1255. break;
  1256. }
  1257. fail_it:
  1258. kfree(ptr);
  1259. }
  1260. #endif
  1261. #ifdef QL_OB_DUMP
  1262. void ql_dump_tx_desc(struct tx_buf_desc *tbd)
  1263. {
  1264. printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
  1265. le64_to_cpu((u64) tbd->addr));
  1266. printk(KERN_ERR PFX "tbd->len = %d\n",
  1267. le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
  1268. printk(KERN_ERR PFX "tbd->flags = %s %s\n",
  1269. tbd->len & TX_DESC_C ? "C" : ".",
  1270. tbd->len & TX_DESC_E ? "E" : ".");
  1271. tbd++;
  1272. printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
  1273. le64_to_cpu((u64) tbd->addr));
  1274. printk(KERN_ERR PFX "tbd->len = %d\n",
  1275. le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
  1276. printk(KERN_ERR PFX "tbd->flags = %s %s\n",
  1277. tbd->len & TX_DESC_C ? "C" : ".",
  1278. tbd->len & TX_DESC_E ? "E" : ".");
  1279. tbd++;
  1280. printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
  1281. le64_to_cpu((u64) tbd->addr));
  1282. printk(KERN_ERR PFX "tbd->len = %d\n",
  1283. le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
  1284. printk(KERN_ERR PFX "tbd->flags = %s %s\n",
  1285. tbd->len & TX_DESC_C ? "C" : ".",
  1286. tbd->len & TX_DESC_E ? "E" : ".");
  1287. }
  1288. void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
  1289. {
  1290. struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
  1291. (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
  1292. struct tx_buf_desc *tbd;
  1293. u16 frame_len;
  1294. printk(KERN_ERR PFX "%s\n", __func__);
  1295. printk(KERN_ERR PFX "opcode = %s\n",
  1296. (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
  1297. printk(KERN_ERR PFX "flags1 = %s %s %s %s %s\n",
  1298. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
  1299. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
  1300. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
  1301. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
  1302. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
  1303. printk(KERN_ERR PFX "flags2 = %s %s %s\n",
  1304. ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
  1305. ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
  1306. ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
  1307. printk(KERN_ERR PFX "flags3 = %s %s %s \n",
  1308. ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
  1309. ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
  1310. ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
  1311. printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid);
  1312. printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx);
  1313. printk(KERN_ERR PFX "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
  1314. if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
  1315. printk(KERN_ERR PFX "frame_len = %d\n",
  1316. le32_to_cpu(ob_mac_tso_iocb->frame_len));
  1317. printk(KERN_ERR PFX "mss = %d\n",
  1318. le16_to_cpu(ob_mac_tso_iocb->mss));
  1319. printk(KERN_ERR PFX "prot_hdr_len = %d\n",
  1320. le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
  1321. printk(KERN_ERR PFX "hdr_offset = 0x%.04x\n",
  1322. le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
  1323. frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
  1324. } else {
  1325. printk(KERN_ERR PFX "frame_len = %d\n",
  1326. le16_to_cpu(ob_mac_iocb->frame_len));
  1327. frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
  1328. }
  1329. tbd = &ob_mac_iocb->tbd[0];
  1330. ql_dump_tx_desc(tbd);
  1331. }
  1332. void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
  1333. {
  1334. printk(KERN_ERR PFX "%s\n", __func__);
  1335. printk(KERN_ERR PFX "opcode = %d\n", ob_mac_rsp->opcode);
  1336. printk(KERN_ERR PFX "flags = %s %s %s %s %s %s %s\n",
  1337. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
  1338. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
  1339. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
  1340. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
  1341. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
  1342. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
  1343. ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
  1344. printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid);
  1345. }
  1346. #endif
  1347. #ifdef QL_IB_DUMP
  1348. void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
  1349. {
  1350. printk(KERN_ERR PFX "%s\n", __func__);
  1351. printk(KERN_ERR PFX "opcode = 0x%x\n", ib_mac_rsp->opcode);
  1352. printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n",
  1353. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
  1354. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
  1355. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
  1356. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
  1357. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
  1358. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
  1359. if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
  1360. printk(KERN_ERR PFX "%s%s%s Multicast.\n",
  1361. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1362. IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
  1363. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1364. IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
  1365. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1366. IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
  1367. printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n",
  1368. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
  1369. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
  1370. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
  1371. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
  1372. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
  1373. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
  1374. printk(KERN_ERR PFX "%s%s%s%s%s error.\n",
  1375. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1376. IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
  1377. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1378. IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
  1379. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1380. IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
  1381. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1382. IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
  1383. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1384. IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
  1385. printk(KERN_ERR PFX "flags3 = %s%s.\n",
  1386. ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
  1387. ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
  1388. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
  1389. printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n",
  1390. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  1391. IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
  1392. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  1393. IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
  1394. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  1395. IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
  1396. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  1397. IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
  1398. printk(KERN_ERR PFX "data_len = %d\n",
  1399. le32_to_cpu(ib_mac_rsp->data_len));
  1400. printk(KERN_ERR PFX "data_addr = 0x%llx\n",
  1401. (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
  1402. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
  1403. printk(KERN_ERR PFX "rss = %x\n",
  1404. le32_to_cpu(ib_mac_rsp->rss));
  1405. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
  1406. printk(KERN_ERR PFX "vlan_id = %x\n",
  1407. le16_to_cpu(ib_mac_rsp->vlan_id));
  1408. printk(KERN_ERR PFX "flags4 = %s%s%s.\n",
  1409. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
  1410. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
  1411. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
  1412. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
  1413. printk(KERN_ERR PFX "hdr length = %d.\n",
  1414. le32_to_cpu(ib_mac_rsp->hdr_len));
  1415. printk(KERN_ERR PFX "hdr addr = 0x%llx.\n",
  1416. (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
  1417. }
  1418. }
  1419. #endif
  1420. #ifdef QL_ALL_DUMP
  1421. void ql_dump_all(struct ql_adapter *qdev)
  1422. {
  1423. int i;
  1424. QL_DUMP_REGS(qdev);
  1425. QL_DUMP_QDEV(qdev);
  1426. for (i = 0; i < qdev->tx_ring_count; i++) {
  1427. QL_DUMP_TX_RING(&qdev->tx_ring[i]);
  1428. QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
  1429. }
  1430. for (i = 0; i < qdev->rx_ring_count; i++) {
  1431. QL_DUMP_RX_RING(&qdev->rx_ring[i]);
  1432. QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
  1433. }
  1434. }
  1435. #endif