qlge_dbg.c 68 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087
  1. #include "qlge.h"
  2. /* Read a NIC register from the alternate function. */
  3. static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
  4. u32 reg)
  5. {
  6. u32 register_to_read;
  7. u32 reg_val;
  8. unsigned int status = 0;
  9. register_to_read = MPI_NIC_REG_BLOCK
  10. | MPI_NIC_READ
  11. | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
  12. | reg;
  13. status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
  14. if (status != 0)
  15. return 0xffffffff;
  16. return reg_val;
  17. }
  18. /* Write a NIC register from the alternate function. */
  19. static int ql_write_other_func_reg(struct ql_adapter *qdev,
  20. u32 reg, u32 reg_val)
  21. {
  22. u32 register_to_read;
  23. int status = 0;
  24. register_to_read = MPI_NIC_REG_BLOCK
  25. | MPI_NIC_READ
  26. | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
  27. | reg;
  28. status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
  29. return status;
  30. }
  31. static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
  32. u32 bit, u32 err_bit)
  33. {
  34. u32 temp;
  35. int count = 10;
  36. while (count) {
  37. temp = ql_read_other_func_reg(qdev, reg);
  38. /* check for errors */
  39. if (temp & err_bit)
  40. return -1;
  41. else if (temp & bit)
  42. return 0;
  43. mdelay(10);
  44. count--;
  45. }
  46. return -1;
  47. }
  48. static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
  49. u32 *data)
  50. {
  51. int status;
  52. /* wait for reg to come ready */
  53. status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
  54. XG_SERDES_ADDR_RDY, 0);
  55. if (status)
  56. goto exit;
  57. /* set up for reg read */
  58. ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
  59. /* wait for reg to come ready */
  60. status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
  61. XG_SERDES_ADDR_RDY, 0);
  62. if (status)
  63. goto exit;
  64. /* get the data */
  65. *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
  66. exit:
  67. return status;
  68. }
  69. /* Read out the SERDES registers */
  70. static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
  71. {
  72. int status;
  73. /* wait for reg to come ready */
  74. status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
  75. if (status)
  76. goto exit;
  77. /* set up for reg read */
  78. ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
  79. /* wait for reg to come ready */
  80. status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
  81. if (status)
  82. goto exit;
  83. /* get the data */
  84. *data = ql_read32(qdev, XG_SERDES_DATA);
  85. exit:
  86. return status;
  87. }
  88. static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
  89. u32 *direct_ptr, u32 *indirect_ptr,
  90. unsigned int direct_valid, unsigned int indirect_valid)
  91. {
  92. unsigned int status;
  93. status = 1;
  94. if (direct_valid)
  95. status = ql_read_serdes_reg(qdev, addr, direct_ptr);
  96. /* Dead fill any failures or invalids. */
  97. if (status)
  98. *direct_ptr = 0xDEADBEEF;
  99. status = 1;
  100. if (indirect_valid)
  101. status = ql_read_other_func_serdes_reg(
  102. qdev, addr, indirect_ptr);
  103. /* Dead fill any failures or invalids. */
  104. if (status)
  105. *indirect_ptr = 0xDEADBEEF;
  106. }
  107. static int ql_get_serdes_regs(struct ql_adapter *qdev,
  108. struct ql_mpi_coredump *mpi_coredump)
  109. {
  110. int status;
  111. unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
  112. unsigned int xaui_indirect_valid, i;
  113. u32 *direct_ptr, temp;
  114. u32 *indirect_ptr;
  115. xfi_direct_valid = xfi_indirect_valid = 0;
  116. xaui_direct_valid = xaui_indirect_valid = 1;
  117. /* The XAUI needs to be read out per port */
  118. if (qdev->func & 1) {
  119. /* We are NIC 2 */
  120. status = ql_read_other_func_serdes_reg(qdev,
  121. XG_SERDES_XAUI_HSS_PCS_START, &temp);
  122. if (status)
  123. temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
  124. if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
  125. XG_SERDES_ADDR_XAUI_PWR_DOWN)
  126. xaui_indirect_valid = 0;
  127. status = ql_read_serdes_reg(qdev,
  128. XG_SERDES_XAUI_HSS_PCS_START, &temp);
  129. if (status)
  130. temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
  131. if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
  132. XG_SERDES_ADDR_XAUI_PWR_DOWN)
  133. xaui_direct_valid = 0;
  134. } else {
  135. /* We are NIC 1 */
  136. status = ql_read_other_func_serdes_reg(qdev,
  137. XG_SERDES_XAUI_HSS_PCS_START, &temp);
  138. if (status)
  139. temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
  140. if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
  141. XG_SERDES_ADDR_XAUI_PWR_DOWN)
  142. xaui_indirect_valid = 0;
  143. status = ql_read_serdes_reg(qdev,
  144. XG_SERDES_XAUI_HSS_PCS_START, &temp);
  145. if (status)
  146. temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
  147. if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
  148. XG_SERDES_ADDR_XAUI_PWR_DOWN)
  149. xaui_direct_valid = 0;
  150. }
  151. /*
  152. * XFI register is shared so only need to read one
  153. * functions and then check the bits.
  154. */
  155. status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
  156. if (status)
  157. temp = 0;
  158. if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
  159. XG_SERDES_ADDR_XFI1_PWR_UP) {
  160. /* now see if i'm NIC 1 or NIC 2 */
  161. if (qdev->func & 1)
  162. /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
  163. xfi_indirect_valid = 1;
  164. else
  165. xfi_direct_valid = 1;
  166. }
  167. if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
  168. XG_SERDES_ADDR_XFI2_PWR_UP) {
  169. /* now see if i'm NIC 1 or NIC 2 */
  170. if (qdev->func & 1)
  171. /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
  172. xfi_direct_valid = 1;
  173. else
  174. xfi_indirect_valid = 1;
  175. }
  176. /* Get XAUI_AN register block. */
  177. if (qdev->func & 1) {
  178. /* Function 2 is direct */
  179. direct_ptr = mpi_coredump->serdes2_xaui_an;
  180. indirect_ptr = mpi_coredump->serdes_xaui_an;
  181. } else {
  182. /* Function 1 is direct */
  183. direct_ptr = mpi_coredump->serdes_xaui_an;
  184. indirect_ptr = mpi_coredump->serdes2_xaui_an;
  185. }
  186. for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
  187. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  188. xaui_direct_valid, xaui_indirect_valid);
  189. /* Get XAUI_HSS_PCS register block. */
  190. if (qdev->func & 1) {
  191. direct_ptr =
  192. mpi_coredump->serdes2_xaui_hss_pcs;
  193. indirect_ptr =
  194. mpi_coredump->serdes_xaui_hss_pcs;
  195. } else {
  196. direct_ptr =
  197. mpi_coredump->serdes_xaui_hss_pcs;
  198. indirect_ptr =
  199. mpi_coredump->serdes2_xaui_hss_pcs;
  200. }
  201. for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
  202. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  203. xaui_direct_valid, xaui_indirect_valid);
  204. /* Get XAUI_XFI_AN register block. */
  205. if (qdev->func & 1) {
  206. direct_ptr = mpi_coredump->serdes2_xfi_an;
  207. indirect_ptr = mpi_coredump->serdes_xfi_an;
  208. } else {
  209. direct_ptr = mpi_coredump->serdes_xfi_an;
  210. indirect_ptr = mpi_coredump->serdes2_xfi_an;
  211. }
  212. for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
  213. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  214. xfi_direct_valid, xfi_indirect_valid);
  215. /* Get XAUI_XFI_TRAIN register block. */
  216. if (qdev->func & 1) {
  217. direct_ptr = mpi_coredump->serdes2_xfi_train;
  218. indirect_ptr =
  219. mpi_coredump->serdes_xfi_train;
  220. } else {
  221. direct_ptr = mpi_coredump->serdes_xfi_train;
  222. indirect_ptr =
  223. mpi_coredump->serdes2_xfi_train;
  224. }
  225. for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
  226. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  227. xfi_direct_valid, xfi_indirect_valid);
  228. /* Get XAUI_XFI_HSS_PCS register block. */
  229. if (qdev->func & 1) {
  230. direct_ptr =
  231. mpi_coredump->serdes2_xfi_hss_pcs;
  232. indirect_ptr =
  233. mpi_coredump->serdes_xfi_hss_pcs;
  234. } else {
  235. direct_ptr =
  236. mpi_coredump->serdes_xfi_hss_pcs;
  237. indirect_ptr =
  238. mpi_coredump->serdes2_xfi_hss_pcs;
  239. }
  240. for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
  241. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  242. xfi_direct_valid, xfi_indirect_valid);
  243. /* Get XAUI_XFI_HSS_TX register block. */
  244. if (qdev->func & 1) {
  245. direct_ptr =
  246. mpi_coredump->serdes2_xfi_hss_tx;
  247. indirect_ptr =
  248. mpi_coredump->serdes_xfi_hss_tx;
  249. } else {
  250. direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
  251. indirect_ptr =
  252. mpi_coredump->serdes2_xfi_hss_tx;
  253. }
  254. for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
  255. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  256. xfi_direct_valid, xfi_indirect_valid);
  257. /* Get XAUI_XFI_HSS_RX register block. */
  258. if (qdev->func & 1) {
  259. direct_ptr =
  260. mpi_coredump->serdes2_xfi_hss_rx;
  261. indirect_ptr =
  262. mpi_coredump->serdes_xfi_hss_rx;
  263. } else {
  264. direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
  265. indirect_ptr =
  266. mpi_coredump->serdes2_xfi_hss_rx;
  267. }
  268. for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
  269. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  270. xfi_direct_valid, xfi_indirect_valid);
  271. /* Get XAUI_XFI_HSS_PLL register block. */
  272. if (qdev->func & 1) {
  273. direct_ptr =
  274. mpi_coredump->serdes2_xfi_hss_pll;
  275. indirect_ptr =
  276. mpi_coredump->serdes_xfi_hss_pll;
  277. } else {
  278. direct_ptr =
  279. mpi_coredump->serdes_xfi_hss_pll;
  280. indirect_ptr =
  281. mpi_coredump->serdes2_xfi_hss_pll;
  282. }
  283. for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
  284. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  285. xfi_direct_valid, xfi_indirect_valid);
  286. return 0;
  287. }
  288. static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
  289. {
  290. int status = 0;
  291. int i;
  292. for (i = 0; i < 8; i++, buf++) {
  293. ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
  294. *buf = ql_read32(qdev, NIC_ETS);
  295. }
  296. for (i = 0; i < 2; i++, buf++) {
  297. ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
  298. *buf = ql_read32(qdev, CNA_ETS);
  299. }
  300. return status;
  301. }
  302. static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
  303. {
  304. int i;
  305. for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
  306. ql_write32(qdev, INTR_EN,
  307. qdev->intr_context[i].intr_read_mask);
  308. *buf = ql_read32(qdev, INTR_EN);
  309. }
  310. }
  311. static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
  312. {
  313. int i, status;
  314. u32 value[3];
  315. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  316. if (status)
  317. return status;
  318. for (i = 0; i < 16; i++) {
  319. status = ql_get_mac_addr_reg(qdev,
  320. MAC_ADDR_TYPE_CAM_MAC, i, value);
  321. if (status) {
  322. QPRINTK(qdev, DRV, ERR,
  323. "Failed read of mac index register.\n");
  324. goto err;
  325. }
  326. *buf++ = value[0]; /* lower MAC address */
  327. *buf++ = value[1]; /* upper MAC address */
  328. *buf++ = value[2]; /* output */
  329. }
  330. for (i = 0; i < 32; i++) {
  331. status = ql_get_mac_addr_reg(qdev,
  332. MAC_ADDR_TYPE_MULTI_MAC, i, value);
  333. if (status) {
  334. QPRINTK(qdev, DRV, ERR,
  335. "Failed read of mac index register.\n");
  336. goto err;
  337. }
  338. *buf++ = value[0]; /* lower Mcast address */
  339. *buf++ = value[1]; /* upper Mcast address */
  340. }
  341. err:
  342. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  343. return status;
  344. }
  345. static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
  346. {
  347. int status;
  348. u32 value, i;
  349. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  350. if (status)
  351. return status;
  352. for (i = 0; i < 16; i++) {
  353. status = ql_get_routing_reg(qdev, i, &value);
  354. if (status) {
  355. QPRINTK(qdev, DRV, ERR,
  356. "Failed read of routing index register.\n");
  357. goto err;
  358. } else {
  359. *buf++ = value;
  360. }
  361. }
  362. err:
  363. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  364. return status;
  365. }
  366. /* Read the MPI Processor shadow registers */
  367. static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
  368. {
  369. u32 i;
  370. int status;
  371. for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
  372. status = ql_write_mpi_reg(qdev, RISC_124,
  373. (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
  374. if (status)
  375. goto end;
  376. status = ql_read_mpi_reg(qdev, RISC_127, buf);
  377. if (status)
  378. goto end;
  379. }
  380. end:
  381. return status;
  382. }
  383. /* Read the MPI Processor core registers */
  384. static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
  385. u32 offset, u32 count)
  386. {
  387. int i, status = 0;
  388. for (i = 0; i < count; i++, buf++) {
  389. status = ql_read_mpi_reg(qdev, offset + i, buf);
  390. if (status)
  391. return status;
  392. }
  393. return status;
  394. }
  395. /* Read the ASIC probe dump */
  396. static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
  397. u32 valid, u32 *buf)
  398. {
  399. u32 module, mux_sel, probe, lo_val, hi_val;
  400. for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
  401. if (!((valid >> module) & 1))
  402. continue;
  403. for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
  404. probe = clock
  405. | PRB_MX_ADDR_ARE
  406. | mux_sel
  407. | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
  408. ql_write32(qdev, PRB_MX_ADDR, probe);
  409. lo_val = ql_read32(qdev, PRB_MX_DATA);
  410. if (mux_sel == 0) {
  411. *buf = probe;
  412. buf++;
  413. }
  414. probe |= PRB_MX_ADDR_UP;
  415. ql_write32(qdev, PRB_MX_ADDR, probe);
  416. hi_val = ql_read32(qdev, PRB_MX_DATA);
  417. *buf = lo_val;
  418. buf++;
  419. *buf = hi_val;
  420. buf++;
  421. }
  422. }
  423. return buf;
  424. }
  425. static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
  426. {
  427. /* First we have to enable the probe mux */
  428. ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
  429. buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
  430. PRB_MX_ADDR_VALID_SYS_MOD, buf);
  431. buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
  432. PRB_MX_ADDR_VALID_PCI_MOD, buf);
  433. buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
  434. PRB_MX_ADDR_VALID_XGM_MOD, buf);
  435. buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
  436. PRB_MX_ADDR_VALID_FC_MOD, buf);
  437. return 0;
  438. }
  439. /* Read out the routing index registers */
  440. static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
  441. {
  442. int status;
  443. u32 type, index, index_max;
  444. u32 result_index;
  445. u32 result_data;
  446. u32 val;
  447. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  448. if (status)
  449. return status;
  450. for (type = 0; type < 4; type++) {
  451. if (type < 2)
  452. index_max = 8;
  453. else
  454. index_max = 16;
  455. for (index = 0; index < index_max; index++) {
  456. val = RT_IDX_RS
  457. | (type << RT_IDX_TYPE_SHIFT)
  458. | (index << RT_IDX_IDX_SHIFT);
  459. ql_write32(qdev, RT_IDX, val);
  460. result_index = 0;
  461. while ((result_index & RT_IDX_MR) == 0)
  462. result_index = ql_read32(qdev, RT_IDX);
  463. result_data = ql_read32(qdev, RT_DATA);
  464. *buf = type;
  465. buf++;
  466. *buf = index;
  467. buf++;
  468. *buf = result_index;
  469. buf++;
  470. *buf = result_data;
  471. buf++;
  472. }
  473. }
  474. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  475. return status;
  476. }
  477. /* Read out the MAC protocol registers */
  478. static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
  479. {
  480. u32 result_index, result_data;
  481. u32 type;
  482. u32 index;
  483. u32 offset;
  484. u32 val;
  485. u32 initial_val = MAC_ADDR_RS;
  486. u32 max_index;
  487. u32 max_offset;
  488. for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
  489. switch (type) {
  490. case 0: /* CAM */
  491. initial_val |= MAC_ADDR_ADR;
  492. max_index = MAC_ADDR_MAX_CAM_ENTRIES;
  493. max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
  494. break;
  495. case 1: /* Multicast MAC Address */
  496. max_index = MAC_ADDR_MAX_CAM_WCOUNT;
  497. max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
  498. break;
  499. case 2: /* VLAN filter mask */
  500. case 3: /* MC filter mask */
  501. max_index = MAC_ADDR_MAX_CAM_WCOUNT;
  502. max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
  503. break;
  504. case 4: /* FC MAC addresses */
  505. max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
  506. max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
  507. break;
  508. case 5: /* Mgmt MAC addresses */
  509. max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
  510. max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
  511. break;
  512. case 6: /* Mgmt VLAN addresses */
  513. max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
  514. max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
  515. break;
  516. case 7: /* Mgmt IPv4 address */
  517. max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
  518. max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
  519. break;
  520. case 8: /* Mgmt IPv6 address */
  521. max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
  522. max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
  523. break;
  524. case 9: /* Mgmt TCP/UDP Dest port */
  525. max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
  526. max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
  527. break;
  528. default:
  529. printk(KERN_ERR"Bad type!!! 0x%08x\n", type);
  530. max_index = 0;
  531. max_offset = 0;
  532. break;
  533. }
  534. for (index = 0; index < max_index; index++) {
  535. for (offset = 0; offset < max_offset; offset++) {
  536. val = initial_val
  537. | (type << MAC_ADDR_TYPE_SHIFT)
  538. | (index << MAC_ADDR_IDX_SHIFT)
  539. | (offset);
  540. ql_write32(qdev, MAC_ADDR_IDX, val);
  541. result_index = 0;
  542. while ((result_index & MAC_ADDR_MR) == 0) {
  543. result_index = ql_read32(qdev,
  544. MAC_ADDR_IDX);
  545. }
  546. result_data = ql_read32(qdev, MAC_ADDR_DATA);
  547. *buf = result_index;
  548. buf++;
  549. *buf = result_data;
  550. buf++;
  551. }
  552. }
  553. }
  554. }
  555. static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
  556. {
  557. u32 func_num, reg, reg_val;
  558. int status;
  559. for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
  560. reg = MPI_NIC_REG_BLOCK
  561. | (func_num << MPI_NIC_FUNCTION_SHIFT)
  562. | (SEM / 4);
  563. status = ql_read_mpi_reg(qdev, reg, &reg_val);
  564. *buf = reg_val;
  565. /* if the read failed then dead fill the element. */
  566. if (!status)
  567. *buf = 0xdeadbeef;
  568. buf++;
  569. }
  570. }
  571. /* Create a coredump segment header */
  572. static void ql_build_coredump_seg_header(
  573. struct mpi_coredump_segment_header *seg_hdr,
  574. u32 seg_number, u32 seg_size, u8 *desc)
  575. {
  576. memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
  577. seg_hdr->cookie = MPI_COREDUMP_COOKIE;
  578. seg_hdr->segNum = seg_number;
  579. seg_hdr->segSize = seg_size;
  580. memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
  581. }
  582. /*
  583. * This function should be called when a coredump / probedump
  584. * is to be extracted from the HBA. It is assumed there is a
  585. * qdev structure that contains the base address of the register
  586. * space for this function as well as a coredump structure that
  587. * will contain the dump.
  588. */
  589. int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
  590. {
  591. int status;
  592. int i;
  593. if (!mpi_coredump) {
  594. QPRINTK(qdev, DRV, ERR,
  595. "No memory available.\n");
  596. return -ENOMEM;
  597. }
  598. /* Try to get the spinlock, but dont worry if
  599. * it isn't available. If the firmware died it
  600. * might be holding the sem.
  601. */
  602. ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
  603. status = ql_pause_mpi_risc(qdev);
  604. if (status) {
  605. QPRINTK(qdev, DRV, ERR,
  606. "Failed RISC pause. Status = 0x%.08x\n", status);
  607. goto err;
  608. }
  609. /* Insert the global header */
  610. memset(&(mpi_coredump->mpi_global_header), 0,
  611. sizeof(struct mpi_coredump_global_header));
  612. mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
  613. mpi_coredump->mpi_global_header.headerSize =
  614. sizeof(struct mpi_coredump_global_header);
  615. mpi_coredump->mpi_global_header.imageSize =
  616. sizeof(struct ql_mpi_coredump);
  617. memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
  618. sizeof(mpi_coredump->mpi_global_header.idString));
  619. /* Get generic NIC reg dump */
  620. ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
  621. NIC1_CONTROL_SEG_NUM,
  622. sizeof(struct mpi_coredump_segment_header) +
  623. sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
  624. ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
  625. NIC2_CONTROL_SEG_NUM,
  626. sizeof(struct mpi_coredump_segment_header) +
  627. sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
  628. if (qdev->func & 1) {
  629. /* Odd means our function is NIC 2 */
  630. for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
  631. mpi_coredump->nic2_regs[i] =
  632. ql_read32(qdev, i * sizeof(u32));
  633. for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
  634. mpi_coredump->nic_regs[i] =
  635. ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
  636. } else {
  637. /* Even means our function is NIC 1 */
  638. for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
  639. mpi_coredump->nic_regs[i] =
  640. ql_read32(qdev, i * sizeof(u32));
  641. for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
  642. mpi_coredump->nic2_regs[i] =
  643. ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
  644. }
  645. /* Rev C. Step 20a */
  646. ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
  647. XAUI_AN_SEG_NUM,
  648. sizeof(struct mpi_coredump_segment_header) +
  649. sizeof(mpi_coredump->serdes_xaui_an),
  650. "XAUI AN Registers");
  651. /* Rev C. Step 20b */
  652. ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
  653. XAUI_HSS_PCS_SEG_NUM,
  654. sizeof(struct mpi_coredump_segment_header) +
  655. sizeof(mpi_coredump->serdes_xaui_hss_pcs),
  656. "XAUI HSS PCS Registers");
  657. ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
  658. sizeof(struct mpi_coredump_segment_header) +
  659. sizeof(mpi_coredump->serdes_xfi_an),
  660. "XFI AN Registers");
  661. ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
  662. XFI_TRAIN_SEG_NUM,
  663. sizeof(struct mpi_coredump_segment_header) +
  664. sizeof(mpi_coredump->serdes_xfi_train),
  665. "XFI TRAIN Registers");
  666. ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
  667. XFI_HSS_PCS_SEG_NUM,
  668. sizeof(struct mpi_coredump_segment_header) +
  669. sizeof(mpi_coredump->serdes_xfi_hss_pcs),
  670. "XFI HSS PCS Registers");
  671. ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
  672. XFI_HSS_TX_SEG_NUM,
  673. sizeof(struct mpi_coredump_segment_header) +
  674. sizeof(mpi_coredump->serdes_xfi_hss_tx),
  675. "XFI HSS TX Registers");
  676. ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
  677. XFI_HSS_RX_SEG_NUM,
  678. sizeof(struct mpi_coredump_segment_header) +
  679. sizeof(mpi_coredump->serdes_xfi_hss_rx),
  680. "XFI HSS RX Registers");
  681. ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
  682. XFI_HSS_PLL_SEG_NUM,
  683. sizeof(struct mpi_coredump_segment_header) +
  684. sizeof(mpi_coredump->serdes_xfi_hss_pll),
  685. "XFI HSS PLL Registers");
  686. ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
  687. XAUI2_AN_SEG_NUM,
  688. sizeof(struct mpi_coredump_segment_header) +
  689. sizeof(mpi_coredump->serdes2_xaui_an),
  690. "XAUI2 AN Registers");
  691. ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
  692. XAUI2_HSS_PCS_SEG_NUM,
  693. sizeof(struct mpi_coredump_segment_header) +
  694. sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
  695. "XAUI2 HSS PCS Registers");
  696. ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
  697. XFI2_AN_SEG_NUM,
  698. sizeof(struct mpi_coredump_segment_header) +
  699. sizeof(mpi_coredump->serdes2_xfi_an),
  700. "XFI2 AN Registers");
  701. ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
  702. XFI2_TRAIN_SEG_NUM,
  703. sizeof(struct mpi_coredump_segment_header) +
  704. sizeof(mpi_coredump->serdes2_xfi_train),
  705. "XFI2 TRAIN Registers");
  706. ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
  707. XFI2_HSS_PCS_SEG_NUM,
  708. sizeof(struct mpi_coredump_segment_header) +
  709. sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
  710. "XFI2 HSS PCS Registers");
  711. ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
  712. XFI2_HSS_TX_SEG_NUM,
  713. sizeof(struct mpi_coredump_segment_header) +
  714. sizeof(mpi_coredump->serdes2_xfi_hss_tx),
  715. "XFI2 HSS TX Registers");
  716. ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
  717. XFI2_HSS_RX_SEG_NUM,
  718. sizeof(struct mpi_coredump_segment_header) +
  719. sizeof(mpi_coredump->serdes2_xfi_hss_rx),
  720. "XFI2 HSS RX Registers");
  721. ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
  722. XFI2_HSS_PLL_SEG_NUM,
  723. sizeof(struct mpi_coredump_segment_header) +
  724. sizeof(mpi_coredump->serdes2_xfi_hss_pll),
  725. "XFI2 HSS PLL Registers");
  726. status = ql_get_serdes_regs(qdev, mpi_coredump);
  727. if (status) {
  728. QPRINTK(qdev, DRV, ERR,
  729. "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
  730. status);
  731. goto err;
  732. }
  733. ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
  734. CORE_SEG_NUM,
  735. sizeof(mpi_coredump->core_regs_seg_hdr) +
  736. sizeof(mpi_coredump->mpi_core_regs) +
  737. sizeof(mpi_coredump->mpi_core_sh_regs),
  738. "Core Registers");
  739. /* Get the MPI Core Registers */
  740. status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
  741. MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
  742. if (status)
  743. goto err;
  744. /* Get the 16 MPI shadow registers */
  745. status = ql_get_mpi_shadow_regs(qdev,
  746. &mpi_coredump->mpi_core_sh_regs[0]);
  747. if (status)
  748. goto err;
  749. /* Get the Test Logic Registers */
  750. ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
  751. TEST_LOGIC_SEG_NUM,
  752. sizeof(struct mpi_coredump_segment_header)
  753. + sizeof(mpi_coredump->test_logic_regs),
  754. "Test Logic Regs");
  755. status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
  756. TEST_REGS_ADDR, TEST_REGS_CNT);
  757. if (status)
  758. goto err;
  759. /* Get the RMII Registers */
  760. ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
  761. RMII_SEG_NUM,
  762. sizeof(struct mpi_coredump_segment_header)
  763. + sizeof(mpi_coredump->rmii_regs),
  764. "RMII Registers");
  765. status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
  766. RMII_REGS_ADDR, RMII_REGS_CNT);
  767. if (status)
  768. goto err;
  769. /* Get the FCMAC1 Registers */
  770. ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
  771. FCMAC1_SEG_NUM,
  772. sizeof(struct mpi_coredump_segment_header)
  773. + sizeof(mpi_coredump->fcmac1_regs),
  774. "FCMAC1 Registers");
  775. status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
  776. FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
  777. if (status)
  778. goto err;
  779. /* Get the FCMAC2 Registers */
  780. ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
  781. FCMAC2_SEG_NUM,
  782. sizeof(struct mpi_coredump_segment_header)
  783. + sizeof(mpi_coredump->fcmac2_regs),
  784. "FCMAC2 Registers");
  785. status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
  786. FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
  787. if (status)
  788. goto err;
  789. /* Get the FC1 MBX Registers */
  790. ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
  791. FC1_MBOX_SEG_NUM,
  792. sizeof(struct mpi_coredump_segment_header)
  793. + sizeof(mpi_coredump->fc1_mbx_regs),
  794. "FC1 MBox Regs");
  795. status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
  796. FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
  797. if (status)
  798. goto err;
  799. /* Get the IDE Registers */
  800. ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
  801. IDE_SEG_NUM,
  802. sizeof(struct mpi_coredump_segment_header)
  803. + sizeof(mpi_coredump->ide_regs),
  804. "IDE Registers");
  805. status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
  806. IDE_REGS_ADDR, IDE_REGS_CNT);
  807. if (status)
  808. goto err;
  809. /* Get the NIC1 MBX Registers */
  810. ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
  811. NIC1_MBOX_SEG_NUM,
  812. sizeof(struct mpi_coredump_segment_header)
  813. + sizeof(mpi_coredump->nic1_mbx_regs),
  814. "NIC1 MBox Regs");
  815. status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
  816. NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
  817. if (status)
  818. goto err;
  819. /* Get the SMBus Registers */
  820. ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
  821. SMBUS_SEG_NUM,
  822. sizeof(struct mpi_coredump_segment_header)
  823. + sizeof(mpi_coredump->smbus_regs),
  824. "SMBus Registers");
  825. status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
  826. SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
  827. if (status)
  828. goto err;
  829. /* Get the FC2 MBX Registers */
  830. ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
  831. FC2_MBOX_SEG_NUM,
  832. sizeof(struct mpi_coredump_segment_header)
  833. + sizeof(mpi_coredump->fc2_mbx_regs),
  834. "FC2 MBox Regs");
  835. status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
  836. FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
  837. if (status)
  838. goto err;
  839. /* Get the NIC2 MBX Registers */
  840. ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
  841. NIC2_MBOX_SEG_NUM,
  842. sizeof(struct mpi_coredump_segment_header)
  843. + sizeof(mpi_coredump->nic2_mbx_regs),
  844. "NIC2 MBox Regs");
  845. status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
  846. NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
  847. if (status)
  848. goto err;
  849. /* Get the I2C Registers */
  850. ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
  851. I2C_SEG_NUM,
  852. sizeof(struct mpi_coredump_segment_header)
  853. + sizeof(mpi_coredump->i2c_regs),
  854. "I2C Registers");
  855. status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
  856. I2C_REGS_ADDR, I2C_REGS_CNT);
  857. if (status)
  858. goto err;
  859. /* Get the MEMC Registers */
  860. ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
  861. MEMC_SEG_NUM,
  862. sizeof(struct mpi_coredump_segment_header)
  863. + sizeof(mpi_coredump->memc_regs),
  864. "MEMC Registers");
  865. status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
  866. MEMC_REGS_ADDR, MEMC_REGS_CNT);
  867. if (status)
  868. goto err;
  869. /* Get the PBus Registers */
  870. ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
  871. PBUS_SEG_NUM,
  872. sizeof(struct mpi_coredump_segment_header)
  873. + sizeof(mpi_coredump->pbus_regs),
  874. "PBUS Registers");
  875. status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
  876. PBUS_REGS_ADDR, PBUS_REGS_CNT);
  877. if (status)
  878. goto err;
  879. /* Get the MDE Registers */
  880. ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
  881. MDE_SEG_NUM,
  882. sizeof(struct mpi_coredump_segment_header)
  883. + sizeof(mpi_coredump->mde_regs),
  884. "MDE Registers");
  885. status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
  886. MDE_REGS_ADDR, MDE_REGS_CNT);
  887. if (status)
  888. goto err;
  889. ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
  890. MISC_NIC_INFO_SEG_NUM,
  891. sizeof(struct mpi_coredump_segment_header)
  892. + sizeof(mpi_coredump->misc_nic_info),
  893. "MISC NIC INFO");
  894. mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
  895. mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
  896. mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
  897. mpi_coredump->misc_nic_info.function = qdev->func;
  898. /* Segment 31 */
  899. /* Get indexed register values. */
  900. ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
  901. INTR_STATES_SEG_NUM,
  902. sizeof(struct mpi_coredump_segment_header)
  903. + sizeof(mpi_coredump->intr_states),
  904. "INTR States");
  905. ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
  906. ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
  907. CAM_ENTRIES_SEG_NUM,
  908. sizeof(struct mpi_coredump_segment_header)
  909. + sizeof(mpi_coredump->cam_entries),
  910. "CAM Entries");
  911. status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
  912. if (status)
  913. goto err;
  914. ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
  915. ROUTING_WORDS_SEG_NUM,
  916. sizeof(struct mpi_coredump_segment_header)
  917. + sizeof(mpi_coredump->nic_routing_words),
  918. "Routing Words");
  919. status = ql_get_routing_entries(qdev,
  920. &mpi_coredump->nic_routing_words[0]);
  921. if (status)
  922. goto err;
  923. /* Segment 34 (Rev C. step 23) */
  924. ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
  925. ETS_SEG_NUM,
  926. sizeof(struct mpi_coredump_segment_header)
  927. + sizeof(mpi_coredump->ets),
  928. "ETS Registers");
  929. status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
  930. if (status)
  931. goto err;
  932. ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
  933. PROBE_DUMP_SEG_NUM,
  934. sizeof(struct mpi_coredump_segment_header)
  935. + sizeof(mpi_coredump->probe_dump),
  936. "Probe Dump");
  937. ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
  938. ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
  939. ROUTING_INDEX_SEG_NUM,
  940. sizeof(struct mpi_coredump_segment_header)
  941. + sizeof(mpi_coredump->routing_regs),
  942. "Routing Regs");
  943. status = ql_get_routing_index_registers(qdev,
  944. &mpi_coredump->routing_regs[0]);
  945. if (status)
  946. goto err;
  947. ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
  948. MAC_PROTOCOL_SEG_NUM,
  949. sizeof(struct mpi_coredump_segment_header)
  950. + sizeof(mpi_coredump->mac_prot_regs),
  951. "MAC Prot Regs");
  952. ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
  953. /* Get the semaphore registers for all 5 functions */
  954. ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
  955. SEM_REGS_SEG_NUM,
  956. sizeof(struct mpi_coredump_segment_header) +
  957. sizeof(mpi_coredump->sem_regs), "Sem Registers");
  958. ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
  959. /* Prevent the mpi restarting while we dump the memory.*/
  960. ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
  961. /* clear the pause */
  962. status = ql_unpause_mpi_risc(qdev);
  963. if (status) {
  964. QPRINTK(qdev, DRV, ERR,
  965. "Failed RISC unpause. Status = 0x%.08x\n", status);
  966. goto err;
  967. }
  968. /* Reset the RISC so we can dump RAM */
  969. status = ql_hard_reset_mpi_risc(qdev);
  970. if (status) {
  971. QPRINTK(qdev, DRV, ERR,
  972. "Failed RISC reset. Status = 0x%.08x\n", status);
  973. goto err;
  974. }
  975. ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
  976. WCS_RAM_SEG_NUM,
  977. sizeof(struct mpi_coredump_segment_header)
  978. + sizeof(mpi_coredump->code_ram),
  979. "WCS RAM");
  980. status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
  981. CODE_RAM_ADDR, CODE_RAM_CNT);
  982. if (status) {
  983. QPRINTK(qdev, DRV, ERR,
  984. "Failed Dump of CODE RAM. Status = 0x%.08x\n", status);
  985. goto err;
  986. }
  987. /* Insert the segment header */
  988. ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
  989. MEMC_RAM_SEG_NUM,
  990. sizeof(struct mpi_coredump_segment_header)
  991. + sizeof(mpi_coredump->memc_ram),
  992. "MEMC RAM");
  993. status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
  994. MEMC_RAM_ADDR, MEMC_RAM_CNT);
  995. if (status) {
  996. QPRINTK(qdev, DRV, ERR,
  997. "Failed Dump of MEMC RAM. Status = 0x%.08x\n", status);
  998. goto err;
  999. }
  1000. err:
  1001. ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
  1002. return status;
  1003. }
  1004. void ql_gen_reg_dump(struct ql_adapter *qdev,
  1005. struct ql_reg_dump *mpi_coredump)
  1006. {
  1007. int i, status;
  1008. memset(&(mpi_coredump->mpi_global_header), 0,
  1009. sizeof(struct mpi_coredump_global_header));
  1010. mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
  1011. mpi_coredump->mpi_global_header.headerSize =
  1012. sizeof(struct mpi_coredump_global_header);
  1013. mpi_coredump->mpi_global_header.imageSize =
  1014. sizeof(struct ql_reg_dump);
  1015. memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
  1016. sizeof(mpi_coredump->mpi_global_header.idString));
  1017. /* segment 16 */
  1018. ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
  1019. MISC_NIC_INFO_SEG_NUM,
  1020. sizeof(struct mpi_coredump_segment_header)
  1021. + sizeof(mpi_coredump->misc_nic_info),
  1022. "MISC NIC INFO");
  1023. mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
  1024. mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
  1025. mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
  1026. mpi_coredump->misc_nic_info.function = qdev->func;
  1027. /* Segment 16, Rev C. Step 18 */
  1028. ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
  1029. NIC1_CONTROL_SEG_NUM,
  1030. sizeof(struct mpi_coredump_segment_header)
  1031. + sizeof(mpi_coredump->nic_regs),
  1032. "NIC Registers");
  1033. /* Get generic reg dump */
  1034. for (i = 0; i < 64; i++)
  1035. mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
  1036. /* Segment 31 */
  1037. /* Get indexed register values. */
  1038. ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
  1039. INTR_STATES_SEG_NUM,
  1040. sizeof(struct mpi_coredump_segment_header)
  1041. + sizeof(mpi_coredump->intr_states),
  1042. "INTR States");
  1043. ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
  1044. ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
  1045. CAM_ENTRIES_SEG_NUM,
  1046. sizeof(struct mpi_coredump_segment_header)
  1047. + sizeof(mpi_coredump->cam_entries),
  1048. "CAM Entries");
  1049. status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
  1050. if (status)
  1051. return;
  1052. ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
  1053. ROUTING_WORDS_SEG_NUM,
  1054. sizeof(struct mpi_coredump_segment_header)
  1055. + sizeof(mpi_coredump->nic_routing_words),
  1056. "Routing Words");
  1057. status = ql_get_routing_entries(qdev,
  1058. &mpi_coredump->nic_routing_words[0]);
  1059. if (status)
  1060. return;
  1061. /* Segment 34 (Rev C. step 23) */
  1062. ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
  1063. ETS_SEG_NUM,
  1064. sizeof(struct mpi_coredump_segment_header)
  1065. + sizeof(mpi_coredump->ets),
  1066. "ETS Registers");
  1067. status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
  1068. if (status)
  1069. return;
  1070. }
  1071. /* Coredump to messages log file using separate worker thread */
  1072. void ql_mpi_core_to_log(struct work_struct *work)
  1073. {
  1074. struct ql_adapter *qdev =
  1075. container_of(work, struct ql_adapter, mpi_core_to_log.work);
  1076. u32 *tmp, count;
  1077. int i;
  1078. count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
  1079. tmp = (u32 *)qdev->mpi_coredump;
  1080. QPRINTK(qdev, DRV, DEBUG, "Core is dumping to log file!\n");
  1081. for (i = 0; i < count; i += 8) {
  1082. printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
  1083. "%.08x %.08x %.08x \n", i,
  1084. tmp[i + 0],
  1085. tmp[i + 1],
  1086. tmp[i + 2],
  1087. tmp[i + 3],
  1088. tmp[i + 4],
  1089. tmp[i + 5],
  1090. tmp[i + 6],
  1091. tmp[i + 7]);
  1092. msleep(5);
  1093. }
  1094. }
  1095. #ifdef QL_REG_DUMP
  1096. static void ql_dump_intr_states(struct ql_adapter *qdev)
  1097. {
  1098. int i;
  1099. u32 value;
  1100. for (i = 0; i < qdev->intr_count; i++) {
  1101. ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
  1102. value = ql_read32(qdev, INTR_EN);
  1103. printk(KERN_ERR PFX
  1104. "%s: Interrupt %d is %s.\n",
  1105. qdev->ndev->name, i,
  1106. (value & INTR_EN_EN ? "enabled" : "disabled"));
  1107. }
  1108. }
  1109. void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
  1110. {
  1111. u32 data;
  1112. if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
  1113. printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__);
  1114. return;
  1115. }
  1116. ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data);
  1117. printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name,
  1118. data);
  1119. ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data);
  1120. printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name,
  1121. data);
  1122. ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
  1123. printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name,
  1124. data);
  1125. ql_read_xgmac_reg(qdev, TX_CFG, &data);
  1126. printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
  1127. ql_read_xgmac_reg(qdev, RX_CFG, &data);
  1128. printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
  1129. ql_read_xgmac_reg(qdev, FLOW_CTL, &data);
  1130. printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name,
  1131. data);
  1132. ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data);
  1133. printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name,
  1134. data);
  1135. ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data);
  1136. printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name,
  1137. data);
  1138. ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data);
  1139. printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n",
  1140. qdev->ndev->name, data);
  1141. ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data);
  1142. printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n",
  1143. qdev->ndev->name, data);
  1144. ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data);
  1145. printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
  1146. data);
  1147. ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data);
  1148. printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
  1149. data);
  1150. ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data);
  1151. printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name,
  1152. data);
  1153. ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data);
  1154. printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n",
  1155. qdev->ndev->name, data);
  1156. ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data);
  1157. printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name,
  1158. data);
  1159. ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data);
  1160. printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n",
  1161. qdev->ndev->name, data);
  1162. ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data);
  1163. printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name,
  1164. data);
  1165. ql_sem_unlock(qdev, qdev->xg_sem_mask);
  1166. }
  1167. static void ql_dump_ets_regs(struct ql_adapter *qdev)
  1168. {
  1169. }
  1170. static void ql_dump_cam_entries(struct ql_adapter *qdev)
  1171. {
  1172. int i;
  1173. u32 value[3];
  1174. i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  1175. if (i)
  1176. return;
  1177. for (i = 0; i < 4; i++) {
  1178. if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
  1179. printk(KERN_ERR PFX
  1180. "%s: Failed read of mac index register.\n",
  1181. __func__);
  1182. return;
  1183. } else {
  1184. if (value[0])
  1185. printk(KERN_ERR PFX
  1186. "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n",
  1187. qdev->ndev->name, i, value[1], value[0],
  1188. value[2]);
  1189. }
  1190. }
  1191. for (i = 0; i < 32; i++) {
  1192. if (ql_get_mac_addr_reg
  1193. (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
  1194. printk(KERN_ERR PFX
  1195. "%s: Failed read of mac index register.\n",
  1196. __func__);
  1197. return;
  1198. } else {
  1199. if (value[0])
  1200. printk(KERN_ERR PFX
  1201. "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n",
  1202. qdev->ndev->name, i, value[1], value[0]);
  1203. }
  1204. }
  1205. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  1206. }
  1207. void ql_dump_routing_entries(struct ql_adapter *qdev)
  1208. {
  1209. int i;
  1210. u32 value;
  1211. i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  1212. if (i)
  1213. return;
  1214. for (i = 0; i < 16; i++) {
  1215. value = 0;
  1216. if (ql_get_routing_reg(qdev, i, &value)) {
  1217. printk(KERN_ERR PFX
  1218. "%s: Failed read of routing index register.\n",
  1219. __func__);
  1220. return;
  1221. } else {
  1222. if (value)
  1223. printk(KERN_ERR PFX
  1224. "%s: Routing Mask %d = 0x%.08x.\n",
  1225. qdev->ndev->name, i, value);
  1226. }
  1227. }
  1228. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  1229. }
  1230. void ql_dump_regs(struct ql_adapter *qdev)
  1231. {
  1232. printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func);
  1233. printk(KERN_ERR PFX "SYS = 0x%x.\n",
  1234. ql_read32(qdev, SYS));
  1235. printk(KERN_ERR PFX "RST_FO = 0x%x.\n",
  1236. ql_read32(qdev, RST_FO));
  1237. printk(KERN_ERR PFX "FSC = 0x%x.\n",
  1238. ql_read32(qdev, FSC));
  1239. printk(KERN_ERR PFX "CSR = 0x%x.\n",
  1240. ql_read32(qdev, CSR));
  1241. printk(KERN_ERR PFX "ICB_RID = 0x%x.\n",
  1242. ql_read32(qdev, ICB_RID));
  1243. printk(KERN_ERR PFX "ICB_L = 0x%x.\n",
  1244. ql_read32(qdev, ICB_L));
  1245. printk(KERN_ERR PFX "ICB_H = 0x%x.\n",
  1246. ql_read32(qdev, ICB_H));
  1247. printk(KERN_ERR PFX "CFG = 0x%x.\n",
  1248. ql_read32(qdev, CFG));
  1249. printk(KERN_ERR PFX "BIOS_ADDR = 0x%x.\n",
  1250. ql_read32(qdev, BIOS_ADDR));
  1251. printk(KERN_ERR PFX "STS = 0x%x.\n",
  1252. ql_read32(qdev, STS));
  1253. printk(KERN_ERR PFX "INTR_EN = 0x%x.\n",
  1254. ql_read32(qdev, INTR_EN));
  1255. printk(KERN_ERR PFX "INTR_MASK = 0x%x.\n",
  1256. ql_read32(qdev, INTR_MASK));
  1257. printk(KERN_ERR PFX "ISR1 = 0x%x.\n",
  1258. ql_read32(qdev, ISR1));
  1259. printk(KERN_ERR PFX "ISR2 = 0x%x.\n",
  1260. ql_read32(qdev, ISR2));
  1261. printk(KERN_ERR PFX "ISR3 = 0x%x.\n",
  1262. ql_read32(qdev, ISR3));
  1263. printk(KERN_ERR PFX "ISR4 = 0x%x.\n",
  1264. ql_read32(qdev, ISR4));
  1265. printk(KERN_ERR PFX "REV_ID = 0x%x.\n",
  1266. ql_read32(qdev, REV_ID));
  1267. printk(KERN_ERR PFX "FRC_ECC_ERR = 0x%x.\n",
  1268. ql_read32(qdev, FRC_ECC_ERR));
  1269. printk(KERN_ERR PFX "ERR_STS = 0x%x.\n",
  1270. ql_read32(qdev, ERR_STS));
  1271. printk(KERN_ERR PFX "RAM_DBG_ADDR = 0x%x.\n",
  1272. ql_read32(qdev, RAM_DBG_ADDR));
  1273. printk(KERN_ERR PFX "RAM_DBG_DATA = 0x%x.\n",
  1274. ql_read32(qdev, RAM_DBG_DATA));
  1275. printk(KERN_ERR PFX "ECC_ERR_CNT = 0x%x.\n",
  1276. ql_read32(qdev, ECC_ERR_CNT));
  1277. printk(KERN_ERR PFX "SEM = 0x%x.\n",
  1278. ql_read32(qdev, SEM));
  1279. printk(KERN_ERR PFX "GPIO_1 = 0x%x.\n",
  1280. ql_read32(qdev, GPIO_1));
  1281. printk(KERN_ERR PFX "GPIO_2 = 0x%x.\n",
  1282. ql_read32(qdev, GPIO_2));
  1283. printk(KERN_ERR PFX "GPIO_3 = 0x%x.\n",
  1284. ql_read32(qdev, GPIO_3));
  1285. printk(KERN_ERR PFX "XGMAC_ADDR = 0x%x.\n",
  1286. ql_read32(qdev, XGMAC_ADDR));
  1287. printk(KERN_ERR PFX "XGMAC_DATA = 0x%x.\n",
  1288. ql_read32(qdev, XGMAC_DATA));
  1289. printk(KERN_ERR PFX "NIC_ETS = 0x%x.\n",
  1290. ql_read32(qdev, NIC_ETS));
  1291. printk(KERN_ERR PFX "CNA_ETS = 0x%x.\n",
  1292. ql_read32(qdev, CNA_ETS));
  1293. printk(KERN_ERR PFX "FLASH_ADDR = 0x%x.\n",
  1294. ql_read32(qdev, FLASH_ADDR));
  1295. printk(KERN_ERR PFX "FLASH_DATA = 0x%x.\n",
  1296. ql_read32(qdev, FLASH_DATA));
  1297. printk(KERN_ERR PFX "CQ_STOP = 0x%x.\n",
  1298. ql_read32(qdev, CQ_STOP));
  1299. printk(KERN_ERR PFX "PAGE_TBL_RID = 0x%x.\n",
  1300. ql_read32(qdev, PAGE_TBL_RID));
  1301. printk(KERN_ERR PFX "WQ_PAGE_TBL_LO = 0x%x.\n",
  1302. ql_read32(qdev, WQ_PAGE_TBL_LO));
  1303. printk(KERN_ERR PFX "WQ_PAGE_TBL_HI = 0x%x.\n",
  1304. ql_read32(qdev, WQ_PAGE_TBL_HI));
  1305. printk(KERN_ERR PFX "CQ_PAGE_TBL_LO = 0x%x.\n",
  1306. ql_read32(qdev, CQ_PAGE_TBL_LO));
  1307. printk(KERN_ERR PFX "CQ_PAGE_TBL_HI = 0x%x.\n",
  1308. ql_read32(qdev, CQ_PAGE_TBL_HI));
  1309. printk(KERN_ERR PFX "COS_DFLT_CQ1 = 0x%x.\n",
  1310. ql_read32(qdev, COS_DFLT_CQ1));
  1311. printk(KERN_ERR PFX "COS_DFLT_CQ2 = 0x%x.\n",
  1312. ql_read32(qdev, COS_DFLT_CQ2));
  1313. printk(KERN_ERR PFX "SPLT_HDR = 0x%x.\n",
  1314. ql_read32(qdev, SPLT_HDR));
  1315. printk(KERN_ERR PFX "FC_PAUSE_THRES = 0x%x.\n",
  1316. ql_read32(qdev, FC_PAUSE_THRES));
  1317. printk(KERN_ERR PFX "NIC_PAUSE_THRES = 0x%x.\n",
  1318. ql_read32(qdev, NIC_PAUSE_THRES));
  1319. printk(KERN_ERR PFX "FC_ETHERTYPE = 0x%x.\n",
  1320. ql_read32(qdev, FC_ETHERTYPE));
  1321. printk(KERN_ERR PFX "FC_RCV_CFG = 0x%x.\n",
  1322. ql_read32(qdev, FC_RCV_CFG));
  1323. printk(KERN_ERR PFX "NIC_RCV_CFG = 0x%x.\n",
  1324. ql_read32(qdev, NIC_RCV_CFG));
  1325. printk(KERN_ERR PFX "FC_COS_TAGS = 0x%x.\n",
  1326. ql_read32(qdev, FC_COS_TAGS));
  1327. printk(KERN_ERR PFX "NIC_COS_TAGS = 0x%x.\n",
  1328. ql_read32(qdev, NIC_COS_TAGS));
  1329. printk(KERN_ERR PFX "MGMT_RCV_CFG = 0x%x.\n",
  1330. ql_read32(qdev, MGMT_RCV_CFG));
  1331. printk(KERN_ERR PFX "XG_SERDES_ADDR = 0x%x.\n",
  1332. ql_read32(qdev, XG_SERDES_ADDR));
  1333. printk(KERN_ERR PFX "XG_SERDES_DATA = 0x%x.\n",
  1334. ql_read32(qdev, XG_SERDES_DATA));
  1335. printk(KERN_ERR PFX "PRB_MX_ADDR = 0x%x.\n",
  1336. ql_read32(qdev, PRB_MX_ADDR));
  1337. printk(KERN_ERR PFX "PRB_MX_DATA = 0x%x.\n",
  1338. ql_read32(qdev, PRB_MX_DATA));
  1339. ql_dump_intr_states(qdev);
  1340. ql_dump_xgmac_control_regs(qdev);
  1341. ql_dump_ets_regs(qdev);
  1342. ql_dump_cam_entries(qdev);
  1343. ql_dump_routing_entries(qdev);
  1344. }
  1345. #endif
  1346. #ifdef QL_STAT_DUMP
  1347. void ql_dump_stat(struct ql_adapter *qdev)
  1348. {
  1349. printk(KERN_ERR "%s: Enter.\n", __func__);
  1350. printk(KERN_ERR "tx_pkts = %ld\n",
  1351. (unsigned long)qdev->nic_stats.tx_pkts);
  1352. printk(KERN_ERR "tx_bytes = %ld\n",
  1353. (unsigned long)qdev->nic_stats.tx_bytes);
  1354. printk(KERN_ERR "tx_mcast_pkts = %ld.\n",
  1355. (unsigned long)qdev->nic_stats.tx_mcast_pkts);
  1356. printk(KERN_ERR "tx_bcast_pkts = %ld.\n",
  1357. (unsigned long)qdev->nic_stats.tx_bcast_pkts);
  1358. printk(KERN_ERR "tx_ucast_pkts = %ld.\n",
  1359. (unsigned long)qdev->nic_stats.tx_ucast_pkts);
  1360. printk(KERN_ERR "tx_ctl_pkts = %ld.\n",
  1361. (unsigned long)qdev->nic_stats.tx_ctl_pkts);
  1362. printk(KERN_ERR "tx_pause_pkts = %ld.\n",
  1363. (unsigned long)qdev->nic_stats.tx_pause_pkts);
  1364. printk(KERN_ERR "tx_64_pkt = %ld.\n",
  1365. (unsigned long)qdev->nic_stats.tx_64_pkt);
  1366. printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n",
  1367. (unsigned long)qdev->nic_stats.tx_65_to_127_pkt);
  1368. printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n",
  1369. (unsigned long)qdev->nic_stats.tx_128_to_255_pkt);
  1370. printk(KERN_ERR "tx_256_511_pkt = %ld.\n",
  1371. (unsigned long)qdev->nic_stats.tx_256_511_pkt);
  1372. printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n",
  1373. (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt);
  1374. printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n",
  1375. (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt);
  1376. printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n",
  1377. (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt);
  1378. printk(KERN_ERR "tx_undersize_pkt = %ld.\n",
  1379. (unsigned long)qdev->nic_stats.tx_undersize_pkt);
  1380. printk(KERN_ERR "tx_oversize_pkt = %ld.\n",
  1381. (unsigned long)qdev->nic_stats.tx_oversize_pkt);
  1382. printk(KERN_ERR "rx_bytes = %ld.\n",
  1383. (unsigned long)qdev->nic_stats.rx_bytes);
  1384. printk(KERN_ERR "rx_bytes_ok = %ld.\n",
  1385. (unsigned long)qdev->nic_stats.rx_bytes_ok);
  1386. printk(KERN_ERR "rx_pkts = %ld.\n",
  1387. (unsigned long)qdev->nic_stats.rx_pkts);
  1388. printk(KERN_ERR "rx_pkts_ok = %ld.\n",
  1389. (unsigned long)qdev->nic_stats.rx_pkts_ok);
  1390. printk(KERN_ERR "rx_bcast_pkts = %ld.\n",
  1391. (unsigned long)qdev->nic_stats.rx_bcast_pkts);
  1392. printk(KERN_ERR "rx_mcast_pkts = %ld.\n",
  1393. (unsigned long)qdev->nic_stats.rx_mcast_pkts);
  1394. printk(KERN_ERR "rx_ucast_pkts = %ld.\n",
  1395. (unsigned long)qdev->nic_stats.rx_ucast_pkts);
  1396. printk(KERN_ERR "rx_undersize_pkts = %ld.\n",
  1397. (unsigned long)qdev->nic_stats.rx_undersize_pkts);
  1398. printk(KERN_ERR "rx_oversize_pkts = %ld.\n",
  1399. (unsigned long)qdev->nic_stats.rx_oversize_pkts);
  1400. printk(KERN_ERR "rx_jabber_pkts = %ld.\n",
  1401. (unsigned long)qdev->nic_stats.rx_jabber_pkts);
  1402. printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n",
  1403. (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts);
  1404. printk(KERN_ERR "rx_drop_events = %ld.\n",
  1405. (unsigned long)qdev->nic_stats.rx_drop_events);
  1406. printk(KERN_ERR "rx_fcerr_pkts = %ld.\n",
  1407. (unsigned long)qdev->nic_stats.rx_fcerr_pkts);
  1408. printk(KERN_ERR "rx_align_err = %ld.\n",
  1409. (unsigned long)qdev->nic_stats.rx_align_err);
  1410. printk(KERN_ERR "rx_symbol_err = %ld.\n",
  1411. (unsigned long)qdev->nic_stats.rx_symbol_err);
  1412. printk(KERN_ERR "rx_mac_err = %ld.\n",
  1413. (unsigned long)qdev->nic_stats.rx_mac_err);
  1414. printk(KERN_ERR "rx_ctl_pkts = %ld.\n",
  1415. (unsigned long)qdev->nic_stats.rx_ctl_pkts);
  1416. printk(KERN_ERR "rx_pause_pkts = %ld.\n",
  1417. (unsigned long)qdev->nic_stats.rx_pause_pkts);
  1418. printk(KERN_ERR "rx_64_pkts = %ld.\n",
  1419. (unsigned long)qdev->nic_stats.rx_64_pkts);
  1420. printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n",
  1421. (unsigned long)qdev->nic_stats.rx_65_to_127_pkts);
  1422. printk(KERN_ERR "rx_128_255_pkts = %ld.\n",
  1423. (unsigned long)qdev->nic_stats.rx_128_255_pkts);
  1424. printk(KERN_ERR "rx_256_511_pkts = %ld.\n",
  1425. (unsigned long)qdev->nic_stats.rx_256_511_pkts);
  1426. printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n",
  1427. (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts);
  1428. printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n",
  1429. (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts);
  1430. printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n",
  1431. (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts);
  1432. printk(KERN_ERR "rx_len_err_pkts = %ld.\n",
  1433. (unsigned long)qdev->nic_stats.rx_len_err_pkts);
  1434. };
  1435. #endif
  1436. #ifdef QL_DEV_DUMP
  1437. void ql_dump_qdev(struct ql_adapter *qdev)
  1438. {
  1439. int i;
  1440. printk(KERN_ERR PFX "qdev->flags = %lx.\n",
  1441. qdev->flags);
  1442. printk(KERN_ERR PFX "qdev->vlgrp = %p.\n",
  1443. qdev->vlgrp);
  1444. printk(KERN_ERR PFX "qdev->pdev = %p.\n",
  1445. qdev->pdev);
  1446. printk(KERN_ERR PFX "qdev->ndev = %p.\n",
  1447. qdev->ndev);
  1448. printk(KERN_ERR PFX "qdev->chip_rev_id = %d.\n",
  1449. qdev->chip_rev_id);
  1450. printk(KERN_ERR PFX "qdev->reg_base = %p.\n",
  1451. qdev->reg_base);
  1452. printk(KERN_ERR PFX "qdev->doorbell_area = %p.\n",
  1453. qdev->doorbell_area);
  1454. printk(KERN_ERR PFX "qdev->doorbell_area_size = %d.\n",
  1455. qdev->doorbell_area_size);
  1456. printk(KERN_ERR PFX "msg_enable = %x.\n",
  1457. qdev->msg_enable);
  1458. printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area = %p.\n",
  1459. qdev->rx_ring_shadow_reg_area);
  1460. printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma = %llx.\n",
  1461. (unsigned long long) qdev->rx_ring_shadow_reg_dma);
  1462. printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area = %p.\n",
  1463. qdev->tx_ring_shadow_reg_area);
  1464. printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma = %llx.\n",
  1465. (unsigned long long) qdev->tx_ring_shadow_reg_dma);
  1466. printk(KERN_ERR PFX "qdev->intr_count = %d.\n",
  1467. qdev->intr_count);
  1468. if (qdev->msi_x_entry)
  1469. for (i = 0; i < qdev->intr_count; i++) {
  1470. printk(KERN_ERR PFX
  1471. "msi_x_entry.[%d]vector = %d.\n", i,
  1472. qdev->msi_x_entry[i].vector);
  1473. printk(KERN_ERR PFX
  1474. "msi_x_entry.[%d]entry = %d.\n", i,
  1475. qdev->msi_x_entry[i].entry);
  1476. }
  1477. for (i = 0; i < qdev->intr_count; i++) {
  1478. printk(KERN_ERR PFX
  1479. "intr_context[%d].qdev = %p.\n", i,
  1480. qdev->intr_context[i].qdev);
  1481. printk(KERN_ERR PFX
  1482. "intr_context[%d].intr = %d.\n", i,
  1483. qdev->intr_context[i].intr);
  1484. printk(KERN_ERR PFX
  1485. "intr_context[%d].hooked = %d.\n", i,
  1486. qdev->intr_context[i].hooked);
  1487. printk(KERN_ERR PFX
  1488. "intr_context[%d].intr_en_mask = 0x%08x.\n", i,
  1489. qdev->intr_context[i].intr_en_mask);
  1490. printk(KERN_ERR PFX
  1491. "intr_context[%d].intr_dis_mask = 0x%08x.\n", i,
  1492. qdev->intr_context[i].intr_dis_mask);
  1493. printk(KERN_ERR PFX
  1494. "intr_context[%d].intr_read_mask = 0x%08x.\n", i,
  1495. qdev->intr_context[i].intr_read_mask);
  1496. }
  1497. printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count);
  1498. printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count);
  1499. printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size);
  1500. printk(KERN_ERR PFX "qdev->ring_mem = %p.\n", qdev->ring_mem);
  1501. printk(KERN_ERR PFX "qdev->intr_count = %d.\n", qdev->intr_count);
  1502. printk(KERN_ERR PFX "qdev->tx_ring = %p.\n",
  1503. qdev->tx_ring);
  1504. printk(KERN_ERR PFX "qdev->rss_ring_count = %d.\n",
  1505. qdev->rss_ring_count);
  1506. printk(KERN_ERR PFX "qdev->rx_ring = %p.\n", qdev->rx_ring);
  1507. printk(KERN_ERR PFX "qdev->default_rx_queue = %d.\n",
  1508. qdev->default_rx_queue);
  1509. printk(KERN_ERR PFX "qdev->xg_sem_mask = 0x%08x.\n",
  1510. qdev->xg_sem_mask);
  1511. printk(KERN_ERR PFX "qdev->port_link_up = 0x%08x.\n",
  1512. qdev->port_link_up);
  1513. printk(KERN_ERR PFX "qdev->port_init = 0x%08x.\n",
  1514. qdev->port_init);
  1515. }
  1516. #endif
  1517. #ifdef QL_CB_DUMP
  1518. void ql_dump_wqicb(struct wqicb *wqicb)
  1519. {
  1520. printk(KERN_ERR PFX "Dumping wqicb stuff...\n");
  1521. printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len));
  1522. printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags));
  1523. printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
  1524. le16_to_cpu(wqicb->cq_id_rss));
  1525. printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
  1526. printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n",
  1527. (unsigned long long) le64_to_cpu(wqicb->addr));
  1528. printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n",
  1529. (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
  1530. }
  1531. void ql_dump_tx_ring(struct tx_ring *tx_ring)
  1532. {
  1533. if (tx_ring == NULL)
  1534. return;
  1535. printk(KERN_ERR PFX
  1536. "===================== Dumping tx_ring %d ===============.\n",
  1537. tx_ring->wq_id);
  1538. printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
  1539. printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
  1540. (unsigned long long) tx_ring->wq_base_dma);
  1541. printk(KERN_ERR PFX
  1542. "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n",
  1543. tx_ring->cnsmr_idx_sh_reg,
  1544. tx_ring->cnsmr_idx_sh_reg
  1545. ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
  1546. printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
  1547. printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
  1548. printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
  1549. tx_ring->prod_idx_db_reg);
  1550. printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n",
  1551. tx_ring->valid_db_reg);
  1552. printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx);
  1553. printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id);
  1554. printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id);
  1555. printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q);
  1556. printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n",
  1557. atomic_read(&tx_ring->tx_count));
  1558. }
  1559. void ql_dump_ricb(struct ricb *ricb)
  1560. {
  1561. int i;
  1562. printk(KERN_ERR PFX
  1563. "===================== Dumping ricb ===============.\n");
  1564. printk(KERN_ERR PFX "Dumping ricb stuff...\n");
  1565. printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f);
  1566. printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n",
  1567. ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
  1568. ricb->flags & RSS_L6K ? "RSS_L6K " : "",
  1569. ricb->flags & RSS_LI ? "RSS_LI " : "",
  1570. ricb->flags & RSS_LB ? "RSS_LB " : "",
  1571. ricb->flags & RSS_LM ? "RSS_LM " : "",
  1572. ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
  1573. ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
  1574. ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
  1575. ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
  1576. printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask));
  1577. for (i = 0; i < 16; i++)
  1578. printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i,
  1579. le32_to_cpu(ricb->hash_cq_id[i]));
  1580. for (i = 0; i < 10; i++)
  1581. printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i,
  1582. le32_to_cpu(ricb->ipv6_hash_key[i]));
  1583. for (i = 0; i < 4; i++)
  1584. printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i,
  1585. le32_to_cpu(ricb->ipv4_hash_key[i]));
  1586. }
  1587. void ql_dump_cqicb(struct cqicb *cqicb)
  1588. {
  1589. printk(KERN_ERR PFX "Dumping cqicb stuff...\n");
  1590. printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
  1591. printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
  1592. printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
  1593. printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n",
  1594. (unsigned long long) le64_to_cpu(cqicb->addr));
  1595. printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n",
  1596. (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
  1597. printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
  1598. le16_to_cpu(cqicb->pkt_delay));
  1599. printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
  1600. le16_to_cpu(cqicb->irq_delay));
  1601. printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n",
  1602. (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
  1603. printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
  1604. le16_to_cpu(cqicb->lbq_buf_size));
  1605. printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
  1606. le16_to_cpu(cqicb->lbq_len));
  1607. printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n",
  1608. (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
  1609. printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
  1610. le16_to_cpu(cqicb->sbq_buf_size));
  1611. printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
  1612. le16_to_cpu(cqicb->sbq_len));
  1613. }
  1614. void ql_dump_rx_ring(struct rx_ring *rx_ring)
  1615. {
  1616. if (rx_ring == NULL)
  1617. return;
  1618. printk(KERN_ERR PFX
  1619. "===================== Dumping rx_ring %d ===============.\n",
  1620. rx_ring->cq_id);
  1621. printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n",
  1622. rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
  1623. rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
  1624. rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
  1625. printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb);
  1626. printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base);
  1627. printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n",
  1628. (unsigned long long) rx_ring->cq_base_dma);
  1629. printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
  1630. printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
  1631. printk(KERN_ERR PFX
  1632. "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n",
  1633. rx_ring->prod_idx_sh_reg,
  1634. rx_ring->prod_idx_sh_reg
  1635. ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
  1636. printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
  1637. (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
  1638. printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
  1639. rx_ring->cnsmr_idx_db_reg);
  1640. printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx);
  1641. printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry);
  1642. printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n",
  1643. rx_ring->valid_db_reg);
  1644. printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base);
  1645. printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n",
  1646. (unsigned long long) rx_ring->lbq_base_dma);
  1647. printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n",
  1648. rx_ring->lbq_base_indirect);
  1649. printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n",
  1650. (unsigned long long) rx_ring->lbq_base_indirect_dma);
  1651. printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq);
  1652. printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len);
  1653. printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size);
  1654. printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n",
  1655. rx_ring->lbq_prod_idx_db_reg);
  1656. printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n",
  1657. rx_ring->lbq_prod_idx);
  1658. printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n",
  1659. rx_ring->lbq_curr_idx);
  1660. printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n",
  1661. rx_ring->lbq_clean_idx);
  1662. printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n",
  1663. rx_ring->lbq_free_cnt);
  1664. printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n",
  1665. rx_ring->lbq_buf_size);
  1666. printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base);
  1667. printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n",
  1668. (unsigned long long) rx_ring->sbq_base_dma);
  1669. printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n",
  1670. rx_ring->sbq_base_indirect);
  1671. printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n",
  1672. (unsigned long long) rx_ring->sbq_base_indirect_dma);
  1673. printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq);
  1674. printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len);
  1675. printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size);
  1676. printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n",
  1677. rx_ring->sbq_prod_idx_db_reg);
  1678. printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n",
  1679. rx_ring->sbq_prod_idx);
  1680. printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n",
  1681. rx_ring->sbq_curr_idx);
  1682. printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n",
  1683. rx_ring->sbq_clean_idx);
  1684. printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n",
  1685. rx_ring->sbq_free_cnt);
  1686. printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n",
  1687. rx_ring->sbq_buf_size);
  1688. printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id);
  1689. printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq);
  1690. printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu);
  1691. printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev);
  1692. }
  1693. void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
  1694. {
  1695. void *ptr;
  1696. printk(KERN_ERR PFX "%s: Enter.\n", __func__);
  1697. ptr = kmalloc(size, GFP_ATOMIC);
  1698. if (ptr == NULL) {
  1699. printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n",
  1700. __func__);
  1701. return;
  1702. }
  1703. if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
  1704. printk(KERN_ERR "%s: Failed to upload control block!\n",
  1705. __func__);
  1706. goto fail_it;
  1707. }
  1708. switch (bit) {
  1709. case CFG_DRQ:
  1710. ql_dump_wqicb((struct wqicb *)ptr);
  1711. break;
  1712. case CFG_DCQ:
  1713. ql_dump_cqicb((struct cqicb *)ptr);
  1714. break;
  1715. case CFG_DR:
  1716. ql_dump_ricb((struct ricb *)ptr);
  1717. break;
  1718. default:
  1719. printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n",
  1720. __func__, bit);
  1721. break;
  1722. }
  1723. fail_it:
  1724. kfree(ptr);
  1725. }
  1726. #endif
  1727. #ifdef QL_OB_DUMP
  1728. void ql_dump_tx_desc(struct tx_buf_desc *tbd)
  1729. {
  1730. printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
  1731. le64_to_cpu((u64) tbd->addr));
  1732. printk(KERN_ERR PFX "tbd->len = %d\n",
  1733. le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
  1734. printk(KERN_ERR PFX "tbd->flags = %s %s\n",
  1735. tbd->len & TX_DESC_C ? "C" : ".",
  1736. tbd->len & TX_DESC_E ? "E" : ".");
  1737. tbd++;
  1738. printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
  1739. le64_to_cpu((u64) tbd->addr));
  1740. printk(KERN_ERR PFX "tbd->len = %d\n",
  1741. le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
  1742. printk(KERN_ERR PFX "tbd->flags = %s %s\n",
  1743. tbd->len & TX_DESC_C ? "C" : ".",
  1744. tbd->len & TX_DESC_E ? "E" : ".");
  1745. tbd++;
  1746. printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
  1747. le64_to_cpu((u64) tbd->addr));
  1748. printk(KERN_ERR PFX "tbd->len = %d\n",
  1749. le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
  1750. printk(KERN_ERR PFX "tbd->flags = %s %s\n",
  1751. tbd->len & TX_DESC_C ? "C" : ".",
  1752. tbd->len & TX_DESC_E ? "E" : ".");
  1753. }
  1754. void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
  1755. {
  1756. struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
  1757. (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
  1758. struct tx_buf_desc *tbd;
  1759. u16 frame_len;
  1760. printk(KERN_ERR PFX "%s\n", __func__);
  1761. printk(KERN_ERR PFX "opcode = %s\n",
  1762. (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
  1763. printk(KERN_ERR PFX "flags1 = %s %s %s %s %s\n",
  1764. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
  1765. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
  1766. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
  1767. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
  1768. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
  1769. printk(KERN_ERR PFX "flags2 = %s %s %s\n",
  1770. ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
  1771. ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
  1772. ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
  1773. printk(KERN_ERR PFX "flags3 = %s %s %s \n",
  1774. ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
  1775. ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
  1776. ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
  1777. printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid);
  1778. printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx);
  1779. printk(KERN_ERR PFX "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
  1780. if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
  1781. printk(KERN_ERR PFX "frame_len = %d\n",
  1782. le32_to_cpu(ob_mac_tso_iocb->frame_len));
  1783. printk(KERN_ERR PFX "mss = %d\n",
  1784. le16_to_cpu(ob_mac_tso_iocb->mss));
  1785. printk(KERN_ERR PFX "prot_hdr_len = %d\n",
  1786. le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
  1787. printk(KERN_ERR PFX "hdr_offset = 0x%.04x\n",
  1788. le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
  1789. frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
  1790. } else {
  1791. printk(KERN_ERR PFX "frame_len = %d\n",
  1792. le16_to_cpu(ob_mac_iocb->frame_len));
  1793. frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
  1794. }
  1795. tbd = &ob_mac_iocb->tbd[0];
  1796. ql_dump_tx_desc(tbd);
  1797. }
  1798. void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
  1799. {
  1800. printk(KERN_ERR PFX "%s\n", __func__);
  1801. printk(KERN_ERR PFX "opcode = %d\n", ob_mac_rsp->opcode);
  1802. printk(KERN_ERR PFX "flags = %s %s %s %s %s %s %s\n",
  1803. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
  1804. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
  1805. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
  1806. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
  1807. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
  1808. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
  1809. ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
  1810. printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid);
  1811. }
  1812. #endif
  1813. #ifdef QL_IB_DUMP
  1814. void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
  1815. {
  1816. printk(KERN_ERR PFX "%s\n", __func__);
  1817. printk(KERN_ERR PFX "opcode = 0x%x\n", ib_mac_rsp->opcode);
  1818. printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n",
  1819. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
  1820. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
  1821. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
  1822. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
  1823. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
  1824. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
  1825. if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
  1826. printk(KERN_ERR PFX "%s%s%s Multicast.\n",
  1827. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1828. IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
  1829. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1830. IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
  1831. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1832. IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
  1833. printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n",
  1834. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
  1835. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
  1836. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
  1837. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
  1838. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
  1839. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
  1840. printk(KERN_ERR PFX "%s%s%s%s%s error.\n",
  1841. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1842. IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
  1843. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1844. IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
  1845. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1846. IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
  1847. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1848. IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
  1849. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1850. IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
  1851. printk(KERN_ERR PFX "flags3 = %s%s.\n",
  1852. ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
  1853. ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
  1854. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
  1855. printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n",
  1856. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  1857. IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
  1858. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  1859. IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
  1860. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  1861. IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
  1862. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  1863. IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
  1864. printk(KERN_ERR PFX "data_len = %d\n",
  1865. le32_to_cpu(ib_mac_rsp->data_len));
  1866. printk(KERN_ERR PFX "data_addr = 0x%llx\n",
  1867. (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
  1868. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
  1869. printk(KERN_ERR PFX "rss = %x\n",
  1870. le32_to_cpu(ib_mac_rsp->rss));
  1871. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
  1872. printk(KERN_ERR PFX "vlan_id = %x\n",
  1873. le16_to_cpu(ib_mac_rsp->vlan_id));
  1874. printk(KERN_ERR PFX "flags4 = %s%s%s.\n",
  1875. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
  1876. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
  1877. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
  1878. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
  1879. printk(KERN_ERR PFX "hdr length = %d.\n",
  1880. le32_to_cpu(ib_mac_rsp->hdr_len));
  1881. printk(KERN_ERR PFX "hdr addr = 0x%llx.\n",
  1882. (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
  1883. }
  1884. }
  1885. #endif
  1886. #ifdef QL_ALL_DUMP
  1887. void ql_dump_all(struct ql_adapter *qdev)
  1888. {
  1889. int i;
  1890. QL_DUMP_REGS(qdev);
  1891. QL_DUMP_QDEV(qdev);
  1892. for (i = 0; i < qdev->tx_ring_count; i++) {
  1893. QL_DUMP_TX_RING(&qdev->tx_ring[i]);
  1894. QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
  1895. }
  1896. for (i = 0; i < qdev->rx_ring_count; i++) {
  1897. QL_DUMP_RX_RING(&qdev->rx_ring[i]);
  1898. QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
  1899. }
  1900. }
  1901. #endif