qlge.h 63 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340
  1. /*
  2. * QLogic QLA41xx NIC HBA Driver
  3. * Copyright (c) 2003-2006 QLogic Corporation
  4. *
  5. * See LICENSE.qlge for copyright and licensing details.
  6. */
  7. #ifndef _QLGE_H_
  8. #define _QLGE_H_
  9. #include <linux/pci.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/rtnetlink.h>
  12. /*
  13. * General definitions...
  14. */
  15. #define DRV_NAME "qlge"
  16. #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
  17. #define DRV_VERSION "v1.00.00.23.00.00-01"
  18. #define PFX "qlge: "
  19. #define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
  20. do { \
  21. if (!((qdev)->msg_enable & NETIF_MSG_##nlevel)) \
  22. ; \
  23. else \
  24. dev_printk(KERN_##klevel, &((qdev)->pdev->dev), \
  25. "%s: " fmt, __func__, ##args); \
  26. } while (0)
  27. #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
  28. #define QLGE_VENDOR_ID 0x1077
  29. #define QLGE_DEVICE_ID_8012 0x8012
  30. #define QLGE_DEVICE_ID_8000 0x8000
  31. #define MAX_CPUS 8
  32. #define MAX_TX_RINGS MAX_CPUS
  33. #define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
  34. #define NUM_TX_RING_ENTRIES 256
  35. #define NUM_RX_RING_ENTRIES 256
  36. #define NUM_SMALL_BUFFERS 512
  37. #define NUM_LARGE_BUFFERS 512
  38. #define DB_PAGE_SIZE 4096
  39. /* Calculate the number of (4k) pages required to
  40. * contain a buffer queue of the given length.
  41. */
  42. #define MAX_DB_PAGES_PER_BQ(x) \
  43. (((x * sizeof(u64)) / DB_PAGE_SIZE) + \
  44. (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
  45. #define RX_RING_SHADOW_SPACE (sizeof(u64) + \
  46. MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
  47. MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
  48. #define LARGE_BUFFER_MAX_SIZE 8192
  49. #define LARGE_BUFFER_MIN_SIZE 2048
  50. #define MAX_CQ 128
  51. #define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
  52. #define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
  53. #define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
  54. #define UDELAY_COUNT 3
  55. #define UDELAY_DELAY 100
  56. #define TX_DESC_PER_IOCB 8
  57. /* The maximum number of frags we handle is based
  58. * on PAGE_SIZE...
  59. */
  60. #if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
  61. #define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
  62. #else /* all other page sizes */
  63. #define TX_DESC_PER_OAL 0
  64. #endif
  65. /* Word shifting for converting 64-bit
  66. * address to a series of 16-bit words.
  67. * This is used for some MPI firmware
  68. * mailbox commands.
  69. */
  70. #define LSW(x) ((u16)(x))
  71. #define MSW(x) ((u16)((u32)(x) >> 16))
  72. #define LSD(x) ((u32)((u64)(x)))
  73. #define MSD(x) ((u32)((((u64)(x)) >> 32)))
  74. /* MPI test register definitions. This register
  75. * is used for determining alternate NIC function's
  76. * PCI->func number.
  77. */
  78. enum {
  79. MPI_TEST_FUNC_PORT_CFG = 0x1002,
  80. MPI_TEST_FUNC_PRB_CTL = 0x100e,
  81. MPI_TEST_FUNC_PRB_EN = 0x18a20000,
  82. MPI_TEST_FUNC_RST_STS = 0x100a,
  83. MPI_TEST_FUNC_RST_FRC = 0x00000003,
  84. MPI_TEST_NIC_FUNC_MASK = 0x00000007,
  85. MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
  86. MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
  87. MPI_TEST_NIC1_FUNC_SHIFT = 1,
  88. MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
  89. MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
  90. MPI_TEST_NIC2_FUNC_SHIFT = 5,
  91. MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
  92. MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
  93. MPI_TEST_FC1_FUNCTION_SHIFT = 9,
  94. MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
  95. MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
  96. MPI_TEST_FC2_FUNCTION_SHIFT = 13,
  97. MPI_NIC_READ = 0x00000000,
  98. MPI_NIC_REG_BLOCK = 0x00020000,
  99. MPI_NIC_FUNCTION_SHIFT = 6,
  100. };
  101. /*
  102. * Processor Address Register (PROC_ADDR) bit definitions.
  103. */
  104. enum {
  105. /* Misc. stuff */
  106. MAILBOX_COUNT = 16,
  107. MAILBOX_TIMEOUT = 5,
  108. PROC_ADDR_RDY = (1 << 31),
  109. PROC_ADDR_R = (1 << 30),
  110. PROC_ADDR_ERR = (1 << 29),
  111. PROC_ADDR_DA = (1 << 28),
  112. PROC_ADDR_FUNC0_MBI = 0x00001180,
  113. PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
  114. PROC_ADDR_FUNC0_CTL = 0x000011a1,
  115. PROC_ADDR_FUNC2_MBI = 0x00001280,
  116. PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
  117. PROC_ADDR_FUNC2_CTL = 0x000012a1,
  118. PROC_ADDR_MPI_RISC = 0x00000000,
  119. PROC_ADDR_MDE = 0x00010000,
  120. PROC_ADDR_REGBLOCK = 0x00020000,
  121. PROC_ADDR_RISC_REG = 0x00030000,
  122. };
  123. /*
  124. * System Register (SYS) bit definitions.
  125. */
  126. enum {
  127. SYS_EFE = (1 << 0),
  128. SYS_FAE = (1 << 1),
  129. SYS_MDC = (1 << 2),
  130. SYS_DST = (1 << 3),
  131. SYS_DWC = (1 << 4),
  132. SYS_EVW = (1 << 5),
  133. SYS_OMP_DLY_MASK = 0x3f000000,
  134. /*
  135. * There are no values defined as of edit #15.
  136. */
  137. SYS_ODI = (1 << 14),
  138. };
  139. /*
  140. * Reset/Failover Register (RST_FO) bit definitions.
  141. */
  142. enum {
  143. RST_FO_TFO = (1 << 0),
  144. RST_FO_RR_MASK = 0x00060000,
  145. RST_FO_RR_CQ_CAM = 0x00000000,
  146. RST_FO_RR_DROP = 0x00000002,
  147. RST_FO_RR_DQ = 0x00000004,
  148. RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
  149. RST_FO_FRB = (1 << 12),
  150. RST_FO_MOP = (1 << 13),
  151. RST_FO_REG = (1 << 14),
  152. RST_FO_FR = (1 << 15),
  153. };
  154. /*
  155. * Function Specific Control Register (FSC) bit definitions.
  156. */
  157. enum {
  158. FSC_DBRST_MASK = 0x00070000,
  159. FSC_DBRST_256 = 0x00000000,
  160. FSC_DBRST_512 = 0x00000001,
  161. FSC_DBRST_768 = 0x00000002,
  162. FSC_DBRST_1024 = 0x00000003,
  163. FSC_DBL_MASK = 0x00180000,
  164. FSC_DBL_DBRST = 0x00000000,
  165. FSC_DBL_MAX_PLD = 0x00000008,
  166. FSC_DBL_MAX_BRST = 0x00000010,
  167. FSC_DBL_128_BYTES = 0x00000018,
  168. FSC_EC = (1 << 5),
  169. FSC_EPC_MASK = 0x00c00000,
  170. FSC_EPC_INBOUND = (1 << 6),
  171. FSC_EPC_OUTBOUND = (1 << 7),
  172. FSC_VM_PAGESIZE_MASK = 0x07000000,
  173. FSC_VM_PAGE_2K = 0x00000100,
  174. FSC_VM_PAGE_4K = 0x00000200,
  175. FSC_VM_PAGE_8K = 0x00000300,
  176. FSC_VM_PAGE_64K = 0x00000600,
  177. FSC_SH = (1 << 11),
  178. FSC_DSB = (1 << 12),
  179. FSC_STE = (1 << 13),
  180. FSC_FE = (1 << 15),
  181. };
  182. /*
  183. * Host Command Status Register (CSR) bit definitions.
  184. */
  185. enum {
  186. CSR_ERR_STS_MASK = 0x0000003f,
  187. /*
  188. * There are no valued defined as of edit #15.
  189. */
  190. CSR_RR = (1 << 8),
  191. CSR_HRI = (1 << 9),
  192. CSR_RP = (1 << 10),
  193. CSR_CMD_PARM_SHIFT = 22,
  194. CSR_CMD_NOP = 0x00000000,
  195. CSR_CMD_SET_RST = 0x10000000,
  196. CSR_CMD_CLR_RST = 0x20000000,
  197. CSR_CMD_SET_PAUSE = 0x30000000,
  198. CSR_CMD_CLR_PAUSE = 0x40000000,
  199. CSR_CMD_SET_H2R_INT = 0x50000000,
  200. CSR_CMD_CLR_H2R_INT = 0x60000000,
  201. CSR_CMD_PAR_EN = 0x70000000,
  202. CSR_CMD_SET_BAD_PAR = 0x80000000,
  203. CSR_CMD_CLR_BAD_PAR = 0x90000000,
  204. CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
  205. };
  206. /*
  207. * Configuration Register (CFG) bit definitions.
  208. */
  209. enum {
  210. CFG_LRQ = (1 << 0),
  211. CFG_DRQ = (1 << 1),
  212. CFG_LR = (1 << 2),
  213. CFG_DR = (1 << 3),
  214. CFG_LE = (1 << 5),
  215. CFG_LCQ = (1 << 6),
  216. CFG_DCQ = (1 << 7),
  217. CFG_Q_SHIFT = 8,
  218. CFG_Q_MASK = 0x7f000000,
  219. };
  220. /*
  221. * Status Register (STS) bit definitions.
  222. */
  223. enum {
  224. STS_FE = (1 << 0),
  225. STS_PI = (1 << 1),
  226. STS_PL0 = (1 << 2),
  227. STS_PL1 = (1 << 3),
  228. STS_PI0 = (1 << 4),
  229. STS_PI1 = (1 << 5),
  230. STS_FUNC_ID_MASK = 0x000000c0,
  231. STS_FUNC_ID_SHIFT = 6,
  232. STS_F0E = (1 << 8),
  233. STS_F1E = (1 << 9),
  234. STS_F2E = (1 << 10),
  235. STS_F3E = (1 << 11),
  236. STS_NFE = (1 << 12),
  237. };
  238. /*
  239. * Interrupt Enable Register (INTR_EN) bit definitions.
  240. */
  241. enum {
  242. INTR_EN_INTR_MASK = 0x007f0000,
  243. INTR_EN_TYPE_MASK = 0x03000000,
  244. INTR_EN_TYPE_ENABLE = 0x00000100,
  245. INTR_EN_TYPE_DISABLE = 0x00000200,
  246. INTR_EN_TYPE_READ = 0x00000300,
  247. INTR_EN_IHD = (1 << 13),
  248. INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
  249. INTR_EN_EI = (1 << 14),
  250. INTR_EN_EN = (1 << 15),
  251. };
  252. /*
  253. * Interrupt Mask Register (INTR_MASK) bit definitions.
  254. */
  255. enum {
  256. INTR_MASK_PI = (1 << 0),
  257. INTR_MASK_HL0 = (1 << 1),
  258. INTR_MASK_LH0 = (1 << 2),
  259. INTR_MASK_HL1 = (1 << 3),
  260. INTR_MASK_LH1 = (1 << 4),
  261. INTR_MASK_SE = (1 << 5),
  262. INTR_MASK_LSC = (1 << 6),
  263. INTR_MASK_MC = (1 << 7),
  264. INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
  265. };
  266. /*
  267. * Register (REV_ID) bit definitions.
  268. */
  269. enum {
  270. REV_ID_MASK = 0x0000000f,
  271. REV_ID_NICROLL_SHIFT = 0,
  272. REV_ID_NICREV_SHIFT = 4,
  273. REV_ID_XGROLL_SHIFT = 8,
  274. REV_ID_XGREV_SHIFT = 12,
  275. REV_ID_CHIPREV_SHIFT = 28,
  276. };
  277. /*
  278. * Force ECC Error Register (FRC_ECC_ERR) bit definitions.
  279. */
  280. enum {
  281. FRC_ECC_ERR_VW = (1 << 12),
  282. FRC_ECC_ERR_VB = (1 << 13),
  283. FRC_ECC_ERR_NI = (1 << 14),
  284. FRC_ECC_ERR_NO = (1 << 15),
  285. FRC_ECC_PFE_SHIFT = 16,
  286. FRC_ECC_ERR_DO = (1 << 18),
  287. FRC_ECC_P14 = (1 << 19),
  288. };
  289. /*
  290. * Error Status Register (ERR_STS) bit definitions.
  291. */
  292. enum {
  293. ERR_STS_NOF = (1 << 0),
  294. ERR_STS_NIF = (1 << 1),
  295. ERR_STS_DRP = (1 << 2),
  296. ERR_STS_XGP = (1 << 3),
  297. ERR_STS_FOU = (1 << 4),
  298. ERR_STS_FOC = (1 << 5),
  299. ERR_STS_FOF = (1 << 6),
  300. ERR_STS_FIU = (1 << 7),
  301. ERR_STS_FIC = (1 << 8),
  302. ERR_STS_FIF = (1 << 9),
  303. ERR_STS_MOF = (1 << 10),
  304. ERR_STS_TA = (1 << 11),
  305. ERR_STS_MA = (1 << 12),
  306. ERR_STS_MPE = (1 << 13),
  307. ERR_STS_SCE = (1 << 14),
  308. ERR_STS_STE = (1 << 15),
  309. ERR_STS_FOW = (1 << 16),
  310. ERR_STS_UE = (1 << 17),
  311. ERR_STS_MCH = (1 << 26),
  312. ERR_STS_LOC_SHIFT = 27,
  313. };
  314. /*
  315. * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
  316. */
  317. enum {
  318. RAM_DBG_ADDR_FW = (1 << 30),
  319. RAM_DBG_ADDR_FR = (1 << 31),
  320. };
  321. /*
  322. * Semaphore Register (SEM) bit definitions.
  323. */
  324. enum {
  325. /*
  326. * Example:
  327. * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
  328. */
  329. SEM_CLEAR = 0,
  330. SEM_SET = 1,
  331. SEM_FORCE = 3,
  332. SEM_XGMAC0_SHIFT = 0,
  333. SEM_XGMAC1_SHIFT = 2,
  334. SEM_ICB_SHIFT = 4,
  335. SEM_MAC_ADDR_SHIFT = 6,
  336. SEM_FLASH_SHIFT = 8,
  337. SEM_PROBE_SHIFT = 10,
  338. SEM_RT_IDX_SHIFT = 12,
  339. SEM_PROC_REG_SHIFT = 14,
  340. SEM_XGMAC0_MASK = 0x00030000,
  341. SEM_XGMAC1_MASK = 0x000c0000,
  342. SEM_ICB_MASK = 0x00300000,
  343. SEM_MAC_ADDR_MASK = 0x00c00000,
  344. SEM_FLASH_MASK = 0x03000000,
  345. SEM_PROBE_MASK = 0x0c000000,
  346. SEM_RT_IDX_MASK = 0x30000000,
  347. SEM_PROC_REG_MASK = 0xc0000000,
  348. };
  349. /*
  350. * 10G MAC Address Register (XGMAC_ADDR) bit definitions.
  351. */
  352. enum {
  353. XGMAC_ADDR_RDY = (1 << 31),
  354. XGMAC_ADDR_R = (1 << 30),
  355. XGMAC_ADDR_XME = (1 << 29),
  356. /* XGMAC control registers */
  357. PAUSE_SRC_LO = 0x00000100,
  358. PAUSE_SRC_HI = 0x00000104,
  359. GLOBAL_CFG = 0x00000108,
  360. GLOBAL_CFG_RESET = (1 << 0),
  361. GLOBAL_CFG_JUMBO = (1 << 6),
  362. GLOBAL_CFG_TX_STAT_EN = (1 << 10),
  363. GLOBAL_CFG_RX_STAT_EN = (1 << 11),
  364. TX_CFG = 0x0000010c,
  365. TX_CFG_RESET = (1 << 0),
  366. TX_CFG_EN = (1 << 1),
  367. TX_CFG_PREAM = (1 << 2),
  368. RX_CFG = 0x00000110,
  369. RX_CFG_RESET = (1 << 0),
  370. RX_CFG_EN = (1 << 1),
  371. RX_CFG_PREAM = (1 << 2),
  372. FLOW_CTL = 0x0000011c,
  373. PAUSE_OPCODE = 0x00000120,
  374. PAUSE_TIMER = 0x00000124,
  375. PAUSE_FRM_DEST_LO = 0x00000128,
  376. PAUSE_FRM_DEST_HI = 0x0000012c,
  377. MAC_TX_PARAMS = 0x00000134,
  378. MAC_TX_PARAMS_JUMBO = (1 << 31),
  379. MAC_TX_PARAMS_SIZE_SHIFT = 16,
  380. MAC_RX_PARAMS = 0x00000138,
  381. MAC_SYS_INT = 0x00000144,
  382. MAC_SYS_INT_MASK = 0x00000148,
  383. MAC_MGMT_INT = 0x0000014c,
  384. MAC_MGMT_IN_MASK = 0x00000150,
  385. EXT_ARB_MODE = 0x000001fc,
  386. /* XGMAC TX statistics registers */
  387. TX_PKTS = 0x00000200,
  388. TX_BYTES = 0x00000208,
  389. TX_MCAST_PKTS = 0x00000210,
  390. TX_BCAST_PKTS = 0x00000218,
  391. TX_UCAST_PKTS = 0x00000220,
  392. TX_CTL_PKTS = 0x00000228,
  393. TX_PAUSE_PKTS = 0x00000230,
  394. TX_64_PKT = 0x00000238,
  395. TX_65_TO_127_PKT = 0x00000240,
  396. TX_128_TO_255_PKT = 0x00000248,
  397. TX_256_511_PKT = 0x00000250,
  398. TX_512_TO_1023_PKT = 0x00000258,
  399. TX_1024_TO_1518_PKT = 0x00000260,
  400. TX_1519_TO_MAX_PKT = 0x00000268,
  401. TX_UNDERSIZE_PKT = 0x00000270,
  402. TX_OVERSIZE_PKT = 0x00000278,
  403. /* XGMAC statistics control registers */
  404. RX_HALF_FULL_DET = 0x000002a0,
  405. TX_HALF_FULL_DET = 0x000002a4,
  406. RX_OVERFLOW_DET = 0x000002a8,
  407. TX_OVERFLOW_DET = 0x000002ac,
  408. RX_HALF_FULL_MASK = 0x000002b0,
  409. TX_HALF_FULL_MASK = 0x000002b4,
  410. RX_OVERFLOW_MASK = 0x000002b8,
  411. TX_OVERFLOW_MASK = 0x000002bc,
  412. STAT_CNT_CTL = 0x000002c0,
  413. STAT_CNT_CTL_CLEAR_TX = (1 << 0),
  414. STAT_CNT_CTL_CLEAR_RX = (1 << 1),
  415. AUX_RX_HALF_FULL_DET = 0x000002d0,
  416. AUX_TX_HALF_FULL_DET = 0x000002d4,
  417. AUX_RX_OVERFLOW_DET = 0x000002d8,
  418. AUX_TX_OVERFLOW_DET = 0x000002dc,
  419. AUX_RX_HALF_FULL_MASK = 0x000002f0,
  420. AUX_TX_HALF_FULL_MASK = 0x000002f4,
  421. AUX_RX_OVERFLOW_MASK = 0x000002f8,
  422. AUX_TX_OVERFLOW_MASK = 0x000002fc,
  423. /* XGMAC RX statistics registers */
  424. RX_BYTES = 0x00000300,
  425. RX_BYTES_OK = 0x00000308,
  426. RX_PKTS = 0x00000310,
  427. RX_PKTS_OK = 0x00000318,
  428. RX_BCAST_PKTS = 0x00000320,
  429. RX_MCAST_PKTS = 0x00000328,
  430. RX_UCAST_PKTS = 0x00000330,
  431. RX_UNDERSIZE_PKTS = 0x00000338,
  432. RX_OVERSIZE_PKTS = 0x00000340,
  433. RX_JABBER_PKTS = 0x00000348,
  434. RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
  435. RX_DROP_EVENTS = 0x00000358,
  436. RX_FCERR_PKTS = 0x00000360,
  437. RX_ALIGN_ERR = 0x00000368,
  438. RX_SYMBOL_ERR = 0x00000370,
  439. RX_MAC_ERR = 0x00000378,
  440. RX_CTL_PKTS = 0x00000380,
  441. RX_PAUSE_PKTS = 0x00000388,
  442. RX_64_PKTS = 0x00000390,
  443. RX_65_TO_127_PKTS = 0x00000398,
  444. RX_128_255_PKTS = 0x000003a0,
  445. RX_256_511_PKTS = 0x000003a8,
  446. RX_512_TO_1023_PKTS = 0x000003b0,
  447. RX_1024_TO_1518_PKTS = 0x000003b8,
  448. RX_1519_TO_MAX_PKTS = 0x000003c0,
  449. RX_LEN_ERR_PKTS = 0x000003c8,
  450. /* XGMAC MDIO control registers */
  451. MDIO_TX_DATA = 0x00000400,
  452. MDIO_RX_DATA = 0x00000410,
  453. MDIO_CMD = 0x00000420,
  454. MDIO_PHY_ADDR = 0x00000430,
  455. MDIO_PORT = 0x00000440,
  456. MDIO_STATUS = 0x00000450,
  457. XGMAC_REGISTER_END = 0x00000740,
  458. };
  459. /*
  460. * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
  461. */
  462. enum {
  463. ETS_QUEUE_SHIFT = 29,
  464. ETS_REF = (1 << 26),
  465. ETS_RS = (1 << 27),
  466. ETS_P = (1 << 28),
  467. ETS_FC_COS_SHIFT = 23,
  468. };
  469. /*
  470. * Flash Address Register (FLASH_ADDR) bit definitions.
  471. */
  472. enum {
  473. FLASH_ADDR_RDY = (1 << 31),
  474. FLASH_ADDR_R = (1 << 30),
  475. FLASH_ADDR_ERR = (1 << 29),
  476. };
  477. /*
  478. * Stop CQ Processing Register (CQ_STOP) bit definitions.
  479. */
  480. enum {
  481. CQ_STOP_QUEUE_MASK = (0x007f0000),
  482. CQ_STOP_TYPE_MASK = (0x03000000),
  483. CQ_STOP_TYPE_START = 0x00000100,
  484. CQ_STOP_TYPE_STOP = 0x00000200,
  485. CQ_STOP_TYPE_READ = 0x00000300,
  486. CQ_STOP_EN = (1 << 15),
  487. };
  488. /*
  489. * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
  490. */
  491. enum {
  492. MAC_ADDR_IDX_SHIFT = 4,
  493. MAC_ADDR_TYPE_SHIFT = 16,
  494. MAC_ADDR_TYPE_COUNT = 10,
  495. MAC_ADDR_TYPE_MASK = 0x000f0000,
  496. MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
  497. MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
  498. MAC_ADDR_TYPE_VLAN = 0x00020000,
  499. MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
  500. MAC_ADDR_TYPE_FC_MAC = 0x00040000,
  501. MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
  502. MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
  503. MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
  504. MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
  505. MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
  506. MAC_ADDR_ADR = (1 << 25),
  507. MAC_ADDR_RS = (1 << 26),
  508. MAC_ADDR_E = (1 << 27),
  509. MAC_ADDR_MR = (1 << 30),
  510. MAC_ADDR_MW = (1 << 31),
  511. MAX_MULTICAST_ENTRIES = 32,
  512. /* Entry count and words per entry
  513. * for each address type in the filter.
  514. */
  515. MAC_ADDR_MAX_CAM_ENTRIES = 512,
  516. MAC_ADDR_MAX_CAM_WCOUNT = 3,
  517. MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
  518. MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
  519. MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
  520. MAC_ADDR_MAX_VLAN_WCOUNT = 1,
  521. MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
  522. MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
  523. MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
  524. MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
  525. MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
  526. MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
  527. MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
  528. MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
  529. MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
  530. MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
  531. MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
  532. MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
  533. MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
  534. MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
  535. };
  536. /*
  537. * MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
  538. */
  539. enum {
  540. SPLT_HDR_EP = (1 << 31),
  541. };
  542. /*
  543. * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
  544. */
  545. enum {
  546. FC_RCV_CFG_ECT = (1 << 15),
  547. FC_RCV_CFG_DFH = (1 << 20),
  548. FC_RCV_CFG_DVF = (1 << 21),
  549. FC_RCV_CFG_RCE = (1 << 27),
  550. FC_RCV_CFG_RFE = (1 << 28),
  551. FC_RCV_CFG_TEE = (1 << 29),
  552. FC_RCV_CFG_TCE = (1 << 30),
  553. FC_RCV_CFG_TFE = (1 << 31),
  554. };
  555. /*
  556. * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
  557. */
  558. enum {
  559. NIC_RCV_CFG_PPE = (1 << 0),
  560. NIC_RCV_CFG_VLAN_MASK = 0x00060000,
  561. NIC_RCV_CFG_VLAN_ALL = 0x00000000,
  562. NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
  563. NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
  564. NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
  565. NIC_RCV_CFG_RV = (1 << 3),
  566. NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
  567. NIC_RCV_CFG_DFQ_SHIFT = 8,
  568. NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */
  569. };
  570. /*
  571. * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
  572. */
  573. enum {
  574. MGMT_RCV_CFG_ARP = (1 << 0),
  575. MGMT_RCV_CFG_DHC = (1 << 1),
  576. MGMT_RCV_CFG_DHS = (1 << 2),
  577. MGMT_RCV_CFG_NP = (1 << 3),
  578. MGMT_RCV_CFG_I6N = (1 << 4),
  579. MGMT_RCV_CFG_I6R = (1 << 5),
  580. MGMT_RCV_CFG_DH6 = (1 << 6),
  581. MGMT_RCV_CFG_UD1 = (1 << 7),
  582. MGMT_RCV_CFG_UD0 = (1 << 8),
  583. MGMT_RCV_CFG_BCT = (1 << 9),
  584. MGMT_RCV_CFG_MCT = (1 << 10),
  585. MGMT_RCV_CFG_DM = (1 << 11),
  586. MGMT_RCV_CFG_RM = (1 << 12),
  587. MGMT_RCV_CFG_STL = (1 << 13),
  588. MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
  589. MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
  590. MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
  591. MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
  592. MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
  593. };
  594. /*
  595. * Routing Index Register (RT_IDX) bit definitions.
  596. */
  597. enum {
  598. RT_IDX_IDX_SHIFT = 8,
  599. RT_IDX_TYPE_MASK = 0x000f0000,
  600. RT_IDX_TYPE_SHIFT = 16,
  601. RT_IDX_TYPE_RT = 0x00000000,
  602. RT_IDX_TYPE_RT_INV = 0x00010000,
  603. RT_IDX_TYPE_NICQ = 0x00020000,
  604. RT_IDX_TYPE_NICQ_INV = 0x00030000,
  605. RT_IDX_DST_MASK = 0x00700000,
  606. RT_IDX_DST_RSS = 0x00000000,
  607. RT_IDX_DST_CAM_Q = 0x00100000,
  608. RT_IDX_DST_COS_Q = 0x00200000,
  609. RT_IDX_DST_DFLT_Q = 0x00300000,
  610. RT_IDX_DST_DEST_Q = 0x00400000,
  611. RT_IDX_RS = (1 << 26),
  612. RT_IDX_E = (1 << 27),
  613. RT_IDX_MR = (1 << 30),
  614. RT_IDX_MW = (1 << 31),
  615. /* Nic Queue format - type 2 bits */
  616. RT_IDX_BCAST = (1 << 0),
  617. RT_IDX_MCAST = (1 << 1),
  618. RT_IDX_MCAST_MATCH = (1 << 2),
  619. RT_IDX_MCAST_REG_MATCH = (1 << 3),
  620. RT_IDX_MCAST_HASH_MATCH = (1 << 4),
  621. RT_IDX_FC_MACH = (1 << 5),
  622. RT_IDX_ETH_FCOE = (1 << 6),
  623. RT_IDX_CAM_HIT = (1 << 7),
  624. RT_IDX_CAM_BIT0 = (1 << 8),
  625. RT_IDX_CAM_BIT1 = (1 << 9),
  626. RT_IDX_VLAN_TAG = (1 << 10),
  627. RT_IDX_VLAN_MATCH = (1 << 11),
  628. RT_IDX_VLAN_FILTER = (1 << 12),
  629. RT_IDX_ETH_SKIP1 = (1 << 13),
  630. RT_IDX_ETH_SKIP2 = (1 << 14),
  631. RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
  632. RT_IDX_802_3 = (1 << 16),
  633. RT_IDX_LLDP = (1 << 17),
  634. RT_IDX_UNUSED018 = (1 << 18),
  635. RT_IDX_UNUSED019 = (1 << 19),
  636. RT_IDX_UNUSED20 = (1 << 20),
  637. RT_IDX_UNUSED21 = (1 << 21),
  638. RT_IDX_ERR = (1 << 22),
  639. RT_IDX_VALID = (1 << 23),
  640. RT_IDX_TU_CSUM_ERR = (1 << 24),
  641. RT_IDX_IP_CSUM_ERR = (1 << 25),
  642. RT_IDX_MAC_ERR = (1 << 26),
  643. RT_IDX_RSS_TCP6 = (1 << 27),
  644. RT_IDX_RSS_TCP4 = (1 << 28),
  645. RT_IDX_RSS_IPV6 = (1 << 29),
  646. RT_IDX_RSS_IPV4 = (1 << 30),
  647. RT_IDX_RSS_MATCH = (1 << 31),
  648. /* Hierarchy for the NIC Queue Mask */
  649. RT_IDX_ALL_ERR_SLOT = 0,
  650. RT_IDX_MAC_ERR_SLOT = 0,
  651. RT_IDX_IP_CSUM_ERR_SLOT = 1,
  652. RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
  653. RT_IDX_BCAST_SLOT = 3,
  654. RT_IDX_MCAST_MATCH_SLOT = 4,
  655. RT_IDX_ALLMULTI_SLOT = 5,
  656. RT_IDX_UNUSED6_SLOT = 6,
  657. RT_IDX_UNUSED7_SLOT = 7,
  658. RT_IDX_RSS_MATCH_SLOT = 8,
  659. RT_IDX_RSS_IPV4_SLOT = 8,
  660. RT_IDX_RSS_IPV6_SLOT = 9,
  661. RT_IDX_RSS_TCP4_SLOT = 10,
  662. RT_IDX_RSS_TCP6_SLOT = 11,
  663. RT_IDX_CAM_HIT_SLOT = 12,
  664. RT_IDX_UNUSED013 = 13,
  665. RT_IDX_UNUSED014 = 14,
  666. RT_IDX_PROMISCUOUS_SLOT = 15,
  667. RT_IDX_MAX_RT_SLOTS = 8,
  668. RT_IDX_MAX_NIC_SLOTS = 16,
  669. };
  670. /*
  671. * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
  672. */
  673. enum {
  674. XG_SERDES_ADDR_RDY = (1 << 31),
  675. XG_SERDES_ADDR_R = (1 << 30),
  676. XG_SERDES_ADDR_STS = 0x00001E06,
  677. XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
  678. XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
  679. XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
  680. /* Serdes coredump definitions. */
  681. XG_SERDES_XAUI_AN_START = 0x00000000,
  682. XG_SERDES_XAUI_AN_END = 0x00000034,
  683. XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
  684. XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
  685. XG_SERDES_XFI_AN_START = 0x00001000,
  686. XG_SERDES_XFI_AN_END = 0x00001034,
  687. XG_SERDES_XFI_TRAIN_START = 0x10001050,
  688. XG_SERDES_XFI_TRAIN_END = 0x1000107C,
  689. XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
  690. XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
  691. XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
  692. XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
  693. XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
  694. XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
  695. XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
  696. XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
  697. };
  698. /*
  699. * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
  700. */
  701. enum {
  702. PRB_MX_ADDR_ARE = (1 << 16),
  703. PRB_MX_ADDR_UP = (1 << 15),
  704. PRB_MX_ADDR_SWP = (1 << 14),
  705. /* Module select values. */
  706. PRB_MX_ADDR_MAX_MODS = 21,
  707. PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
  708. PRB_MX_ADDR_MOD_SEL_TBD = 0,
  709. PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
  710. PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
  711. PRB_MX_ADDR_MOD_SEL_FRB = 3,
  712. PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
  713. PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
  714. PRB_MX_ADDR_MOD_SEL_DA1 = 6,
  715. PRB_MX_ADDR_MOD_SEL_DA2 = 7,
  716. PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
  717. PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
  718. PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
  719. PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
  720. PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
  721. PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
  722. PRB_MX_ADDR_MOD_SEL_REG = 14,
  723. PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
  724. PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
  725. PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
  726. PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
  727. PRB_MX_ADDR_MOD_SEL_MOP = 20,
  728. /* Bit fields indicating which modules
  729. * are valid for each clock domain.
  730. */
  731. PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
  732. PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
  733. PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
  734. PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
  735. PRB_MX_ADDR_VALID_TOTAL = 34,
  736. /* Clock domain values. */
  737. PRB_MX_ADDR_CLOCK_SHIFT = 6,
  738. PRB_MX_ADDR_SYS_CLOCK = 0,
  739. PRB_MX_ADDR_PCI_CLOCK = 2,
  740. PRB_MX_ADDR_FC_CLOCK = 5,
  741. PRB_MX_ADDR_XGM_CLOCK = 6,
  742. PRB_MX_ADDR_MAX_MUX = 64,
  743. };
  744. /*
  745. * Control Register Set Map
  746. */
  747. enum {
  748. PROC_ADDR = 0, /* Use semaphore */
  749. PROC_DATA = 0x04, /* Use semaphore */
  750. SYS = 0x08,
  751. RST_FO = 0x0c,
  752. FSC = 0x10,
  753. CSR = 0x14,
  754. LED = 0x18,
  755. ICB_RID = 0x1c, /* Use semaphore */
  756. ICB_L = 0x20, /* Use semaphore */
  757. ICB_H = 0x24, /* Use semaphore */
  758. CFG = 0x28,
  759. BIOS_ADDR = 0x2c,
  760. STS = 0x30,
  761. INTR_EN = 0x34,
  762. INTR_MASK = 0x38,
  763. ISR1 = 0x3c,
  764. ISR2 = 0x40,
  765. ISR3 = 0x44,
  766. ISR4 = 0x48,
  767. REV_ID = 0x4c,
  768. FRC_ECC_ERR = 0x50,
  769. ERR_STS = 0x54,
  770. RAM_DBG_ADDR = 0x58,
  771. RAM_DBG_DATA = 0x5c,
  772. ECC_ERR_CNT = 0x60,
  773. SEM = 0x64,
  774. GPIO_1 = 0x68, /* Use semaphore */
  775. GPIO_2 = 0x6c, /* Use semaphore */
  776. GPIO_3 = 0x70, /* Use semaphore */
  777. RSVD2 = 0x74,
  778. XGMAC_ADDR = 0x78, /* Use semaphore */
  779. XGMAC_DATA = 0x7c, /* Use semaphore */
  780. NIC_ETS = 0x80,
  781. CNA_ETS = 0x84,
  782. FLASH_ADDR = 0x88, /* Use semaphore */
  783. FLASH_DATA = 0x8c, /* Use semaphore */
  784. CQ_STOP = 0x90,
  785. PAGE_TBL_RID = 0x94,
  786. WQ_PAGE_TBL_LO = 0x98,
  787. WQ_PAGE_TBL_HI = 0x9c,
  788. CQ_PAGE_TBL_LO = 0xa0,
  789. CQ_PAGE_TBL_HI = 0xa4,
  790. MAC_ADDR_IDX = 0xa8, /* Use semaphore */
  791. MAC_ADDR_DATA = 0xac, /* Use semaphore */
  792. COS_DFLT_CQ1 = 0xb0,
  793. COS_DFLT_CQ2 = 0xb4,
  794. ETYPE_SKIP1 = 0xb8,
  795. ETYPE_SKIP2 = 0xbc,
  796. SPLT_HDR = 0xc0,
  797. FC_PAUSE_THRES = 0xc4,
  798. NIC_PAUSE_THRES = 0xc8,
  799. FC_ETHERTYPE = 0xcc,
  800. FC_RCV_CFG = 0xd0,
  801. NIC_RCV_CFG = 0xd4,
  802. FC_COS_TAGS = 0xd8,
  803. NIC_COS_TAGS = 0xdc,
  804. MGMT_RCV_CFG = 0xe0,
  805. RT_IDX = 0xe4,
  806. RT_DATA = 0xe8,
  807. RSVD7 = 0xec,
  808. XG_SERDES_ADDR = 0xf0,
  809. XG_SERDES_DATA = 0xf4,
  810. PRB_MX_ADDR = 0xf8, /* Use semaphore */
  811. PRB_MX_DATA = 0xfc, /* Use semaphore */
  812. };
  813. #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  814. #define SMALL_BUFFER_SIZE 256
  815. #define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
  816. #define SPLT_SETTING FSC_DBRST_1024
  817. #define SPLT_LEN 0
  818. #define QLGE_SB_PAD 0
  819. #else
  820. #define SMALL_BUFFER_SIZE 512
  821. #define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
  822. #define SPLT_SETTING FSC_SH
  823. #define SPLT_LEN (SPLT_HDR_EP | \
  824. min(SMALL_BUF_MAP_SIZE, 1023))
  825. #define QLGE_SB_PAD 32
  826. #endif
  827. /*
  828. * CAM output format.
  829. */
  830. enum {
  831. CAM_OUT_ROUTE_FC = 0,
  832. CAM_OUT_ROUTE_NIC = 1,
  833. CAM_OUT_FUNC_SHIFT = 2,
  834. CAM_OUT_RV = (1 << 4),
  835. CAM_OUT_SH = (1 << 15),
  836. CAM_OUT_CQ_ID_SHIFT = 5,
  837. };
  838. /*
  839. * Mailbox definitions
  840. */
  841. enum {
  842. /* Asynchronous Event Notifications */
  843. AEN_SYS_ERR = 0x00008002,
  844. AEN_LINK_UP = 0x00008011,
  845. AEN_LINK_DOWN = 0x00008012,
  846. AEN_IDC_CMPLT = 0x00008100,
  847. AEN_IDC_REQ = 0x00008101,
  848. AEN_IDC_EXT = 0x00008102,
  849. AEN_DCBX_CHG = 0x00008110,
  850. AEN_AEN_LOST = 0x00008120,
  851. AEN_AEN_SFP_IN = 0x00008130,
  852. AEN_AEN_SFP_OUT = 0x00008131,
  853. AEN_FW_INIT_DONE = 0x00008400,
  854. AEN_FW_INIT_FAIL = 0x00008401,
  855. /* Mailbox Command Opcodes. */
  856. MB_CMD_NOP = 0x00000000,
  857. MB_CMD_EX_FW = 0x00000002,
  858. MB_CMD_MB_TEST = 0x00000006,
  859. MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */
  860. MB_CMD_ABOUT_FW = 0x00000008,
  861. MB_CMD_COPY_RISC_RAM = 0x0000000a,
  862. MB_CMD_LOAD_RISC_RAM = 0x0000000b,
  863. MB_CMD_DUMP_RISC_RAM = 0x0000000c,
  864. MB_CMD_WRITE_RAM = 0x0000000d,
  865. MB_CMD_INIT_RISC_RAM = 0x0000000e,
  866. MB_CMD_READ_RAM = 0x0000000f,
  867. MB_CMD_STOP_FW = 0x00000014,
  868. MB_CMD_MAKE_SYS_ERR = 0x0000002a,
  869. MB_CMD_WRITE_SFP = 0x00000030,
  870. MB_CMD_READ_SFP = 0x00000031,
  871. MB_CMD_INIT_FW = 0x00000060,
  872. MB_CMD_GET_IFCB = 0x00000061,
  873. MB_CMD_GET_FW_STATE = 0x00000069,
  874. MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */
  875. MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */
  876. MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */
  877. MB_WOL_DISABLE = 0,
  878. MB_WOL_MAGIC_PKT = (1 << 1),
  879. MB_WOL_FLTR = (1 << 2),
  880. MB_WOL_UCAST = (1 << 3),
  881. MB_WOL_MCAST = (1 << 4),
  882. MB_WOL_BCAST = (1 << 5),
  883. MB_WOL_LINK_UP = (1 << 6),
  884. MB_WOL_LINK_DOWN = (1 << 7),
  885. MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */
  886. MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
  887. MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
  888. MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
  889. MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */
  890. MB_CMD_SET_WOL_IMMED = 0x00000115,
  891. MB_CMD_PORT_RESET = 0x00000120,
  892. MB_CMD_SET_PORT_CFG = 0x00000122,
  893. MB_CMD_GET_PORT_CFG = 0x00000123,
  894. MB_CMD_GET_LINK_STS = 0x00000124,
  895. MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
  896. QL_LED_BLINK = 0x03e803e8,
  897. MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
  898. MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
  899. MB_SET_MPI_TFK_STOP = (1 << 0),
  900. MB_SET_MPI_TFK_RESUME = (1 << 1),
  901. MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
  902. MB_GET_MPI_TFK_STOPPED = (1 << 0),
  903. MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
  904. /* Sub-commands for IDC request.
  905. * This describes the reason for the
  906. * IDC request.
  907. */
  908. MB_CMD_IOP_NONE = 0x0000,
  909. MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001,
  910. MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002,
  911. MB_CMD_IOP_PREP_LINK_DOWN = 0x0010,
  912. MB_CMD_IOP_DVR_START = 0x0100,
  913. MB_CMD_IOP_FLASH_ACC = 0x0101,
  914. MB_CMD_IOP_RESTART_MPI = 0x0102,
  915. MB_CMD_IOP_CORE_DUMP_MPI = 0x0103,
  916. /* Mailbox Command Status. */
  917. MB_CMD_STS_GOOD = 0x00004000, /* Success. */
  918. MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */
  919. MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */
  920. MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */
  921. MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */
  922. MB_CMD_STS_ERR = 0x00004005, /* System Error. */
  923. MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */
  924. };
  925. struct mbox_params {
  926. u32 mbox_in[MAILBOX_COUNT];
  927. u32 mbox_out[MAILBOX_COUNT];
  928. int in_count;
  929. int out_count;
  930. };
  931. struct flash_params_8012 {
  932. u8 dev_id_str[4];
  933. __le16 size;
  934. __le16 csum;
  935. __le16 ver;
  936. __le16 sub_dev_id;
  937. u8 mac_addr[6];
  938. __le16 res;
  939. };
  940. /* 8000 device's flash is a different structure
  941. * at a different offset in flash.
  942. */
  943. #define FUNC0_FLASH_OFFSET 0x140200
  944. #define FUNC1_FLASH_OFFSET 0x140600
  945. /* Flash related data structures. */
  946. struct flash_params_8000 {
  947. u8 dev_id_str[4]; /* "8000" */
  948. __le16 ver;
  949. __le16 size;
  950. __le16 csum;
  951. __le16 reserved0;
  952. __le16 total_size;
  953. __le16 entry_count;
  954. u8 data_type0;
  955. u8 data_size0;
  956. u8 mac_addr[6];
  957. u8 data_type1;
  958. u8 data_size1;
  959. u8 mac_addr1[6];
  960. u8 data_type2;
  961. u8 data_size2;
  962. __le16 vlan_id;
  963. u8 data_type3;
  964. u8 data_size3;
  965. __le16 last;
  966. u8 reserved1[464];
  967. __le16 subsys_ven_id;
  968. __le16 subsys_dev_id;
  969. u8 reserved2[4];
  970. };
  971. union flash_params {
  972. struct flash_params_8012 flash_params_8012;
  973. struct flash_params_8000 flash_params_8000;
  974. };
  975. /*
  976. * doorbell space for the rx ring context
  977. */
  978. struct rx_doorbell_context {
  979. u32 cnsmr_idx; /* 0x00 */
  980. u32 valid; /* 0x04 */
  981. u32 reserved[4]; /* 0x08-0x14 */
  982. u32 lbq_prod_idx; /* 0x18 */
  983. u32 sbq_prod_idx; /* 0x1c */
  984. };
  985. /*
  986. * doorbell space for the tx ring context
  987. */
  988. struct tx_doorbell_context {
  989. u32 prod_idx; /* 0x00 */
  990. u32 valid; /* 0x04 */
  991. u32 reserved[4]; /* 0x08-0x14 */
  992. u32 lbq_prod_idx; /* 0x18 */
  993. u32 sbq_prod_idx; /* 0x1c */
  994. };
  995. /* DATA STRUCTURES SHARED WITH HARDWARE. */
  996. struct tx_buf_desc {
  997. __le64 addr;
  998. __le32 len;
  999. #define TX_DESC_LEN_MASK 0x000fffff
  1000. #define TX_DESC_C 0x40000000
  1001. #define TX_DESC_E 0x80000000
  1002. } __attribute((packed));
  1003. /*
  1004. * IOCB Definitions...
  1005. */
  1006. #define OPCODE_OB_MAC_IOCB 0x01
  1007. #define OPCODE_OB_MAC_TSO_IOCB 0x02
  1008. #define OPCODE_IB_MAC_IOCB 0x20
  1009. #define OPCODE_IB_MPI_IOCB 0x21
  1010. #define OPCODE_IB_AE_IOCB 0x3f
  1011. struct ob_mac_iocb_req {
  1012. u8 opcode;
  1013. u8 flags1;
  1014. #define OB_MAC_IOCB_REQ_OI 0x01
  1015. #define OB_MAC_IOCB_REQ_I 0x02
  1016. #define OB_MAC_IOCB_REQ_D 0x08
  1017. #define OB_MAC_IOCB_REQ_F 0x10
  1018. u8 flags2;
  1019. u8 flags3;
  1020. #define OB_MAC_IOCB_DFP 0x02
  1021. #define OB_MAC_IOCB_V 0x04
  1022. __le32 reserved1[2];
  1023. __le16 frame_len;
  1024. #define OB_MAC_IOCB_LEN_MASK 0x3ffff
  1025. __le16 reserved2;
  1026. u32 tid;
  1027. u32 txq_idx;
  1028. __le32 reserved3;
  1029. __le16 vlan_tci;
  1030. __le16 reserved4;
  1031. struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
  1032. } __attribute((packed));
  1033. struct ob_mac_iocb_rsp {
  1034. u8 opcode; /* */
  1035. u8 flags1; /* */
  1036. #define OB_MAC_IOCB_RSP_OI 0x01 /* */
  1037. #define OB_MAC_IOCB_RSP_I 0x02 /* */
  1038. #define OB_MAC_IOCB_RSP_E 0x08 /* */
  1039. #define OB_MAC_IOCB_RSP_S 0x10 /* too Short */
  1040. #define OB_MAC_IOCB_RSP_L 0x20 /* too Large */
  1041. #define OB_MAC_IOCB_RSP_P 0x40 /* Padded */
  1042. u8 flags2; /* */
  1043. u8 flags3; /* */
  1044. #define OB_MAC_IOCB_RSP_B 0x80 /* */
  1045. u32 tid;
  1046. u32 txq_idx;
  1047. __le32 reserved[13];
  1048. } __attribute((packed));
  1049. struct ob_mac_tso_iocb_req {
  1050. u8 opcode;
  1051. u8 flags1;
  1052. #define OB_MAC_TSO_IOCB_OI 0x01
  1053. #define OB_MAC_TSO_IOCB_I 0x02
  1054. #define OB_MAC_TSO_IOCB_D 0x08
  1055. #define OB_MAC_TSO_IOCB_IP4 0x40
  1056. #define OB_MAC_TSO_IOCB_IP6 0x80
  1057. u8 flags2;
  1058. #define OB_MAC_TSO_IOCB_LSO 0x20
  1059. #define OB_MAC_TSO_IOCB_UC 0x40
  1060. #define OB_MAC_TSO_IOCB_TC 0x80
  1061. u8 flags3;
  1062. #define OB_MAC_TSO_IOCB_IC 0x01
  1063. #define OB_MAC_TSO_IOCB_DFP 0x02
  1064. #define OB_MAC_TSO_IOCB_V 0x04
  1065. __le32 reserved1[2];
  1066. __le32 frame_len;
  1067. u32 tid;
  1068. u32 txq_idx;
  1069. __le16 total_hdrs_len;
  1070. __le16 net_trans_offset;
  1071. #define OB_MAC_TRANSPORT_HDR_SHIFT 6
  1072. __le16 vlan_tci;
  1073. __le16 mss;
  1074. struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
  1075. } __attribute((packed));
  1076. struct ob_mac_tso_iocb_rsp {
  1077. u8 opcode;
  1078. u8 flags1;
  1079. #define OB_MAC_TSO_IOCB_RSP_OI 0x01
  1080. #define OB_MAC_TSO_IOCB_RSP_I 0x02
  1081. #define OB_MAC_TSO_IOCB_RSP_E 0x08
  1082. #define OB_MAC_TSO_IOCB_RSP_S 0x10
  1083. #define OB_MAC_TSO_IOCB_RSP_L 0x20
  1084. #define OB_MAC_TSO_IOCB_RSP_P 0x40
  1085. u8 flags2; /* */
  1086. u8 flags3; /* */
  1087. #define OB_MAC_TSO_IOCB_RSP_B 0x8000
  1088. u32 tid;
  1089. u32 txq_idx;
  1090. __le32 reserved2[13];
  1091. } __attribute((packed));
  1092. struct ib_mac_iocb_rsp {
  1093. u8 opcode; /* 0x20 */
  1094. u8 flags1;
  1095. #define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */
  1096. #define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */
  1097. #define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
  1098. #define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
  1099. #define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
  1100. #define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
  1101. #define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */
  1102. #define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */
  1103. #define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */
  1104. #define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
  1105. #define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
  1106. #define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */
  1107. u8 flags2;
  1108. #define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */
  1109. #define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */
  1110. #define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */
  1111. #define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04
  1112. #define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08
  1113. #define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10
  1114. #define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14
  1115. #define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18
  1116. #define IB_MAC_IOCB_RSP_ERR_CRC 0x1c
  1117. #define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */
  1118. #define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */
  1119. #define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */
  1120. u8 flags3;
  1121. #define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */
  1122. #define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
  1123. #define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
  1124. #define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
  1125. #define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
  1126. #define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
  1127. #define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
  1128. #define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
  1129. #define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
  1130. #define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
  1131. #define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
  1132. __le32 data_len; /* */
  1133. __le64 data_addr; /* */
  1134. __le32 rss; /* */
  1135. __le16 vlan_id; /* 12 bits */
  1136. #define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
  1137. #define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
  1138. #define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff
  1139. __le16 reserved1;
  1140. __le32 reserved2[6];
  1141. u8 reserved3[3];
  1142. u8 flags4;
  1143. #define IB_MAC_IOCB_RSP_HV 0x20
  1144. #define IB_MAC_IOCB_RSP_HS 0x40
  1145. #define IB_MAC_IOCB_RSP_HL 0x80
  1146. __le32 hdr_len; /* */
  1147. __le64 hdr_addr; /* */
  1148. } __attribute((packed));
  1149. struct ib_ae_iocb_rsp {
  1150. u8 opcode;
  1151. u8 flags1;
  1152. #define IB_AE_IOCB_RSP_OI 0x01
  1153. #define IB_AE_IOCB_RSP_I 0x02
  1154. u8 event;
  1155. #define LINK_UP_EVENT 0x00
  1156. #define LINK_DOWN_EVENT 0x01
  1157. #define CAM_LOOKUP_ERR_EVENT 0x06
  1158. #define SOFT_ECC_ERROR_EVENT 0x07
  1159. #define MGMT_ERR_EVENT 0x08
  1160. #define TEN_GIG_MAC_EVENT 0x09
  1161. #define GPI0_H2L_EVENT 0x10
  1162. #define GPI0_L2H_EVENT 0x20
  1163. #define GPI1_H2L_EVENT 0x11
  1164. #define GPI1_L2H_EVENT 0x21
  1165. #define PCI_ERR_ANON_BUF_RD 0x40
  1166. u8 q_id;
  1167. __le32 reserved[15];
  1168. } __attribute((packed));
  1169. /*
  1170. * These three structures are for generic
  1171. * handling of ib and ob iocbs.
  1172. */
  1173. struct ql_net_rsp_iocb {
  1174. u8 opcode;
  1175. u8 flags0;
  1176. __le16 length;
  1177. __le32 tid;
  1178. __le32 reserved[14];
  1179. } __attribute((packed));
  1180. struct net_req_iocb {
  1181. u8 opcode;
  1182. u8 flags0;
  1183. __le16 flags1;
  1184. __le32 tid;
  1185. __le32 reserved1[30];
  1186. } __attribute((packed));
  1187. /*
  1188. * tx ring initialization control block for chip.
  1189. * It is defined as:
  1190. * "Work Queue Initialization Control Block"
  1191. */
  1192. struct wqicb {
  1193. __le16 len;
  1194. #define Q_LEN_V (1 << 4)
  1195. #define Q_LEN_CPP_CONT 0x0000
  1196. #define Q_LEN_CPP_16 0x0001
  1197. #define Q_LEN_CPP_32 0x0002
  1198. #define Q_LEN_CPP_64 0x0003
  1199. #define Q_LEN_CPP_512 0x0006
  1200. __le16 flags;
  1201. #define Q_PRI_SHIFT 1
  1202. #define Q_FLAGS_LC 0x1000
  1203. #define Q_FLAGS_LB 0x2000
  1204. #define Q_FLAGS_LI 0x4000
  1205. #define Q_FLAGS_LO 0x8000
  1206. __le16 cq_id_rss;
  1207. #define Q_CQ_ID_RSS_RV 0x8000
  1208. __le16 rid;
  1209. __le64 addr;
  1210. __le64 cnsmr_idx_addr;
  1211. } __attribute((packed));
  1212. /*
  1213. * rx ring initialization control block for chip.
  1214. * It is defined as:
  1215. * "Completion Queue Initialization Control Block"
  1216. */
  1217. struct cqicb {
  1218. u8 msix_vect;
  1219. u8 reserved1;
  1220. u8 reserved2;
  1221. u8 flags;
  1222. #define FLAGS_LV 0x08
  1223. #define FLAGS_LS 0x10
  1224. #define FLAGS_LL 0x20
  1225. #define FLAGS_LI 0x40
  1226. #define FLAGS_LC 0x80
  1227. __le16 len;
  1228. #define LEN_V (1 << 4)
  1229. #define LEN_CPP_CONT 0x0000
  1230. #define LEN_CPP_32 0x0001
  1231. #define LEN_CPP_64 0x0002
  1232. #define LEN_CPP_128 0x0003
  1233. __le16 rid;
  1234. __le64 addr;
  1235. __le64 prod_idx_addr;
  1236. __le16 pkt_delay;
  1237. __le16 irq_delay;
  1238. __le64 lbq_addr;
  1239. __le16 lbq_buf_size;
  1240. __le16 lbq_len; /* entry count */
  1241. __le64 sbq_addr;
  1242. __le16 sbq_buf_size;
  1243. __le16 sbq_len; /* entry count */
  1244. } __attribute((packed));
  1245. struct ricb {
  1246. u8 base_cq;
  1247. #define RSS_L4K 0x80
  1248. u8 flags;
  1249. #define RSS_L6K 0x01
  1250. #define RSS_LI 0x02
  1251. #define RSS_LB 0x04
  1252. #define RSS_LM 0x08
  1253. #define RSS_RI4 0x10
  1254. #define RSS_RT4 0x20
  1255. #define RSS_RI6 0x40
  1256. #define RSS_RT6 0x80
  1257. __le16 mask;
  1258. u8 hash_cq_id[1024];
  1259. __le32 ipv6_hash_key[10];
  1260. __le32 ipv4_hash_key[4];
  1261. } __attribute((packed));
  1262. /* SOFTWARE/DRIVER DATA STRUCTURES. */
  1263. struct oal {
  1264. struct tx_buf_desc oal[TX_DESC_PER_OAL];
  1265. };
  1266. struct map_list {
  1267. DECLARE_PCI_UNMAP_ADDR(mapaddr);
  1268. DECLARE_PCI_UNMAP_LEN(maplen);
  1269. };
  1270. struct tx_ring_desc {
  1271. struct sk_buff *skb;
  1272. struct ob_mac_iocb_req *queue_entry;
  1273. u32 index;
  1274. struct oal oal;
  1275. struct map_list map[MAX_SKB_FRAGS + 1];
  1276. int map_cnt;
  1277. struct tx_ring_desc *next;
  1278. };
  1279. struct page_chunk {
  1280. struct page *page; /* master page */
  1281. char *va; /* virt addr for this chunk */
  1282. u64 map; /* mapping for master */
  1283. unsigned int offset; /* offset for this chunk */
  1284. unsigned int last_flag; /* flag set for last chunk in page */
  1285. };
  1286. struct bq_desc {
  1287. union {
  1288. struct page_chunk pg_chunk;
  1289. struct sk_buff *skb;
  1290. } p;
  1291. __le64 *addr;
  1292. u32 index;
  1293. DECLARE_PCI_UNMAP_ADDR(mapaddr);
  1294. DECLARE_PCI_UNMAP_LEN(maplen);
  1295. };
  1296. #define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
  1297. struct tx_ring {
  1298. /*
  1299. * queue info.
  1300. */
  1301. struct wqicb wqicb; /* structure used to inform chip of new queue */
  1302. void *wq_base; /* pci_alloc:virtual addr for tx */
  1303. dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
  1304. __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
  1305. dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
  1306. u32 wq_size; /* size in bytes of queue area */
  1307. u32 wq_len; /* number of entries in queue */
  1308. void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */
  1309. void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */
  1310. u16 prod_idx; /* current value for prod idx */
  1311. u16 cq_id; /* completion (rx) queue for tx completions */
  1312. u8 wq_id; /* queue id for this entry */
  1313. u8 reserved1[3];
  1314. struct tx_ring_desc *q; /* descriptor list for the queue */
  1315. spinlock_t lock;
  1316. atomic_t tx_count; /* counts down for every outstanding IO */
  1317. atomic_t queue_stopped; /* Turns queue off when full. */
  1318. struct delayed_work tx_work;
  1319. struct ql_adapter *qdev;
  1320. u64 tx_packets;
  1321. u64 tx_bytes;
  1322. u64 tx_errors;
  1323. };
  1324. /*
  1325. * Type of inbound queue.
  1326. */
  1327. enum {
  1328. DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
  1329. TX_Q = 3, /* Handles outbound completions. */
  1330. RX_Q = 4, /* Handles inbound completions. */
  1331. };
  1332. struct rx_ring {
  1333. struct cqicb cqicb; /* The chip's completion queue init control block. */
  1334. /* Completion queue elements. */
  1335. void *cq_base;
  1336. dma_addr_t cq_base_dma;
  1337. u32 cq_size;
  1338. u32 cq_len;
  1339. u16 cq_id;
  1340. __le32 *prod_idx_sh_reg; /* Shadowed producer register. */
  1341. dma_addr_t prod_idx_sh_reg_dma;
  1342. void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
  1343. u32 cnsmr_idx; /* current sw idx */
  1344. struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */
  1345. void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
  1346. /* Large buffer queue elements. */
  1347. u32 lbq_len; /* entry count */
  1348. u32 lbq_size; /* size in bytes of queue */
  1349. u32 lbq_buf_size;
  1350. void *lbq_base;
  1351. dma_addr_t lbq_base_dma;
  1352. void *lbq_base_indirect;
  1353. dma_addr_t lbq_base_indirect_dma;
  1354. struct page_chunk pg_chunk; /* current page for chunks */
  1355. struct bq_desc *lbq; /* array of control blocks */
  1356. void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
  1357. u32 lbq_prod_idx; /* current sw prod idx */
  1358. u32 lbq_curr_idx; /* next entry we expect */
  1359. u32 lbq_clean_idx; /* beginning of new descs */
  1360. u32 lbq_free_cnt; /* free buffer desc cnt */
  1361. /* Small buffer queue elements. */
  1362. u32 sbq_len; /* entry count */
  1363. u32 sbq_size; /* size in bytes of queue */
  1364. u32 sbq_buf_size;
  1365. void *sbq_base;
  1366. dma_addr_t sbq_base_dma;
  1367. void *sbq_base_indirect;
  1368. dma_addr_t sbq_base_indirect_dma;
  1369. struct bq_desc *sbq; /* array of control blocks */
  1370. void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
  1371. u32 sbq_prod_idx; /* current sw prod idx */
  1372. u32 sbq_curr_idx; /* next entry we expect */
  1373. u32 sbq_clean_idx; /* beginning of new descs */
  1374. u32 sbq_free_cnt; /* free buffer desc cnt */
  1375. /* Misc. handler elements. */
  1376. u32 type; /* Type of queue, tx, rx. */
  1377. u32 irq; /* Which vector this ring is assigned. */
  1378. u32 cpu; /* Which CPU this should run on. */
  1379. char name[IFNAMSIZ + 5];
  1380. struct napi_struct napi;
  1381. u8 reserved;
  1382. struct ql_adapter *qdev;
  1383. u64 rx_packets;
  1384. u64 rx_multicast;
  1385. u64 rx_bytes;
  1386. u64 rx_dropped;
  1387. u64 rx_errors;
  1388. };
  1389. /*
  1390. * RSS Initialization Control Block
  1391. */
  1392. struct hash_id {
  1393. u8 value[4];
  1394. };
  1395. struct nic_stats {
  1396. /*
  1397. * These stats come from offset 200h to 278h
  1398. * in the XGMAC register.
  1399. */
  1400. u64 tx_pkts;
  1401. u64 tx_bytes;
  1402. u64 tx_mcast_pkts;
  1403. u64 tx_bcast_pkts;
  1404. u64 tx_ucast_pkts;
  1405. u64 tx_ctl_pkts;
  1406. u64 tx_pause_pkts;
  1407. u64 tx_64_pkt;
  1408. u64 tx_65_to_127_pkt;
  1409. u64 tx_128_to_255_pkt;
  1410. u64 tx_256_511_pkt;
  1411. u64 tx_512_to_1023_pkt;
  1412. u64 tx_1024_to_1518_pkt;
  1413. u64 tx_1519_to_max_pkt;
  1414. u64 tx_undersize_pkt;
  1415. u64 tx_oversize_pkt;
  1416. /*
  1417. * These stats come from offset 300h to 3C8h
  1418. * in the XGMAC register.
  1419. */
  1420. u64 rx_bytes;
  1421. u64 rx_bytes_ok;
  1422. u64 rx_pkts;
  1423. u64 rx_pkts_ok;
  1424. u64 rx_bcast_pkts;
  1425. u64 rx_mcast_pkts;
  1426. u64 rx_ucast_pkts;
  1427. u64 rx_undersize_pkts;
  1428. u64 rx_oversize_pkts;
  1429. u64 rx_jabber_pkts;
  1430. u64 rx_undersize_fcerr_pkts;
  1431. u64 rx_drop_events;
  1432. u64 rx_fcerr_pkts;
  1433. u64 rx_align_err;
  1434. u64 rx_symbol_err;
  1435. u64 rx_mac_err;
  1436. u64 rx_ctl_pkts;
  1437. u64 rx_pause_pkts;
  1438. u64 rx_64_pkts;
  1439. u64 rx_65_to_127_pkts;
  1440. u64 rx_128_255_pkts;
  1441. u64 rx_256_511_pkts;
  1442. u64 rx_512_to_1023_pkts;
  1443. u64 rx_1024_to_1518_pkts;
  1444. u64 rx_1519_to_max_pkts;
  1445. u64 rx_len_err_pkts;
  1446. /*
  1447. * These stats come from offset 500h to 5C8h
  1448. * in the XGMAC register.
  1449. */
  1450. u64 tx_cbfc_pause_frames0;
  1451. u64 tx_cbfc_pause_frames1;
  1452. u64 tx_cbfc_pause_frames2;
  1453. u64 tx_cbfc_pause_frames3;
  1454. u64 tx_cbfc_pause_frames4;
  1455. u64 tx_cbfc_pause_frames5;
  1456. u64 tx_cbfc_pause_frames6;
  1457. u64 tx_cbfc_pause_frames7;
  1458. u64 rx_cbfc_pause_frames0;
  1459. u64 rx_cbfc_pause_frames1;
  1460. u64 rx_cbfc_pause_frames2;
  1461. u64 rx_cbfc_pause_frames3;
  1462. u64 rx_cbfc_pause_frames4;
  1463. u64 rx_cbfc_pause_frames5;
  1464. u64 rx_cbfc_pause_frames6;
  1465. u64 rx_cbfc_pause_frames7;
  1466. u64 rx_nic_fifo_drop;
  1467. };
  1468. /* Firmware coredump internal register address/length pairs. */
  1469. enum {
  1470. MPI_CORE_REGS_ADDR = 0x00030000,
  1471. MPI_CORE_REGS_CNT = 127,
  1472. MPI_CORE_SH_REGS_CNT = 16,
  1473. TEST_REGS_ADDR = 0x00001000,
  1474. TEST_REGS_CNT = 23,
  1475. RMII_REGS_ADDR = 0x00001040,
  1476. RMII_REGS_CNT = 64,
  1477. FCMAC1_REGS_ADDR = 0x00001080,
  1478. FCMAC2_REGS_ADDR = 0x000010c0,
  1479. FCMAC_REGS_CNT = 64,
  1480. FC1_MBX_REGS_ADDR = 0x00001100,
  1481. FC2_MBX_REGS_ADDR = 0x00001240,
  1482. FC_MBX_REGS_CNT = 64,
  1483. IDE_REGS_ADDR = 0x00001140,
  1484. IDE_REGS_CNT = 64,
  1485. NIC1_MBX_REGS_ADDR = 0x00001180,
  1486. NIC2_MBX_REGS_ADDR = 0x00001280,
  1487. NIC_MBX_REGS_CNT = 64,
  1488. SMBUS_REGS_ADDR = 0x00001200,
  1489. SMBUS_REGS_CNT = 64,
  1490. I2C_REGS_ADDR = 0x00001fc0,
  1491. I2C_REGS_CNT = 64,
  1492. MEMC_REGS_ADDR = 0x00003000,
  1493. MEMC_REGS_CNT = 256,
  1494. PBUS_REGS_ADDR = 0x00007c00,
  1495. PBUS_REGS_CNT = 256,
  1496. MDE_REGS_ADDR = 0x00010000,
  1497. MDE_REGS_CNT = 6,
  1498. CODE_RAM_ADDR = 0x00020000,
  1499. CODE_RAM_CNT = 0x2000,
  1500. MEMC_RAM_ADDR = 0x00100000,
  1501. MEMC_RAM_CNT = 0x2000,
  1502. };
  1503. #define MPI_COREDUMP_COOKIE 0x5555aaaa
  1504. struct mpi_coredump_global_header {
  1505. u32 cookie;
  1506. u8 idString[16];
  1507. u32 timeLo;
  1508. u32 timeHi;
  1509. u32 imageSize;
  1510. u32 headerSize;
  1511. u8 info[220];
  1512. };
  1513. struct mpi_coredump_segment_header {
  1514. u32 cookie;
  1515. u32 segNum;
  1516. u32 segSize;
  1517. u32 extra;
  1518. u8 description[16];
  1519. };
  1520. /* Firmware coredump header segment numbers. */
  1521. enum {
  1522. CORE_SEG_NUM = 1,
  1523. TEST_LOGIC_SEG_NUM = 2,
  1524. RMII_SEG_NUM = 3,
  1525. FCMAC1_SEG_NUM = 4,
  1526. FCMAC2_SEG_NUM = 5,
  1527. FC1_MBOX_SEG_NUM = 6,
  1528. IDE_SEG_NUM = 7,
  1529. NIC1_MBOX_SEG_NUM = 8,
  1530. SMBUS_SEG_NUM = 9,
  1531. FC2_MBOX_SEG_NUM = 10,
  1532. NIC2_MBOX_SEG_NUM = 11,
  1533. I2C_SEG_NUM = 12,
  1534. MEMC_SEG_NUM = 13,
  1535. PBUS_SEG_NUM = 14,
  1536. MDE_SEG_NUM = 15,
  1537. NIC1_CONTROL_SEG_NUM = 16,
  1538. NIC2_CONTROL_SEG_NUM = 17,
  1539. NIC1_XGMAC_SEG_NUM = 18,
  1540. NIC2_XGMAC_SEG_NUM = 19,
  1541. WCS_RAM_SEG_NUM = 20,
  1542. MEMC_RAM_SEG_NUM = 21,
  1543. XAUI_AN_SEG_NUM = 22,
  1544. XAUI_HSS_PCS_SEG_NUM = 23,
  1545. XFI_AN_SEG_NUM = 24,
  1546. XFI_TRAIN_SEG_NUM = 25,
  1547. XFI_HSS_PCS_SEG_NUM = 26,
  1548. XFI_HSS_TX_SEG_NUM = 27,
  1549. XFI_HSS_RX_SEG_NUM = 28,
  1550. XFI_HSS_PLL_SEG_NUM = 29,
  1551. MISC_NIC_INFO_SEG_NUM = 30,
  1552. INTR_STATES_SEG_NUM = 31,
  1553. CAM_ENTRIES_SEG_NUM = 32,
  1554. ROUTING_WORDS_SEG_NUM = 33,
  1555. ETS_SEG_NUM = 34,
  1556. PROBE_DUMP_SEG_NUM = 35,
  1557. ROUTING_INDEX_SEG_NUM = 36,
  1558. MAC_PROTOCOL_SEG_NUM = 37,
  1559. XAUI2_AN_SEG_NUM = 38,
  1560. XAUI2_HSS_PCS_SEG_NUM = 39,
  1561. XFI2_AN_SEG_NUM = 40,
  1562. XFI2_TRAIN_SEG_NUM = 41,
  1563. XFI2_HSS_PCS_SEG_NUM = 42,
  1564. XFI2_HSS_TX_SEG_NUM = 43,
  1565. XFI2_HSS_RX_SEG_NUM = 44,
  1566. XFI2_HSS_PLL_SEG_NUM = 45,
  1567. SEM_REGS_SEG_NUM = 50
  1568. };
  1569. /* There are 64 generic NIC registers. */
  1570. #define NIC_REGS_DUMP_WORD_COUNT 64
  1571. /* XGMAC word count. */
  1572. #define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
  1573. /* Word counts for the SERDES blocks. */
  1574. #define XG_SERDES_XAUI_AN_COUNT 14
  1575. #define XG_SERDES_XAUI_HSS_PCS_COUNT 33
  1576. #define XG_SERDES_XFI_AN_COUNT 14
  1577. #define XG_SERDES_XFI_TRAIN_COUNT 12
  1578. #define XG_SERDES_XFI_HSS_PCS_COUNT 15
  1579. #define XG_SERDES_XFI_HSS_TX_COUNT 32
  1580. #define XG_SERDES_XFI_HSS_RX_COUNT 32
  1581. #define XG_SERDES_XFI_HSS_PLL_COUNT 32
  1582. /* There are 2 CNA ETS and 8 NIC ETS registers. */
  1583. #define ETS_REGS_DUMP_WORD_COUNT 10
  1584. /* Each probe mux entry stores the probe type plus 64 entries
  1585. * that are each each 64-bits in length. There are a total of
  1586. * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
  1587. */
  1588. #define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
  1589. #define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
  1590. PRB_MX_ADDR_VALID_TOTAL)
  1591. /* Each routing entry consists of 4 32-bit words.
  1592. * They are route type, index, index word, and result.
  1593. * There are 2 route blocks with 8 entries each and
  1594. * 2 NIC blocks with 16 entries each.
  1595. * The totol entries is 48 with 4 words each.
  1596. */
  1597. #define RT_IDX_DUMP_ENTRIES 48
  1598. #define RT_IDX_DUMP_WORDS_PER_ENTRY 4
  1599. #define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
  1600. RT_IDX_DUMP_WORDS_PER_ENTRY)
  1601. /* There are 10 address blocks in filter, each with
  1602. * different entry counts and different word-count-per-entry.
  1603. */
  1604. #define MAC_ADDR_DUMP_ENTRIES \
  1605. ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
  1606. (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
  1607. (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
  1608. (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
  1609. (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
  1610. (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
  1611. (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
  1612. (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
  1613. (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
  1614. (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
  1615. #define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
  1616. #define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
  1617. MAC_ADDR_DUMP_WORDS_PER_ENTRY)
  1618. /* Maximum of 4 functions whose semaphore registeres are
  1619. * in the coredump.
  1620. */
  1621. #define MAX_SEMAPHORE_FUNCTIONS 4
  1622. /* Defines for access the MPI shadow registers. */
  1623. #define RISC_124 0x0003007c
  1624. #define RISC_127 0x0003007f
  1625. #define SHADOW_OFFSET 0xb0000000
  1626. #define SHADOW_REG_SHIFT 20
  1627. struct ql_nic_misc {
  1628. u32 rx_ring_count;
  1629. u32 tx_ring_count;
  1630. u32 intr_count;
  1631. u32 function;
  1632. };
  1633. struct ql_reg_dump {
  1634. /* segment 0 */
  1635. struct mpi_coredump_global_header mpi_global_header;
  1636. /* segment 16 */
  1637. struct mpi_coredump_segment_header nic_regs_seg_hdr;
  1638. u32 nic_regs[64];
  1639. /* segment 30 */
  1640. struct mpi_coredump_segment_header misc_nic_seg_hdr;
  1641. struct ql_nic_misc misc_nic_info;
  1642. /* segment 31 */
  1643. /* one interrupt state for each CQ */
  1644. struct mpi_coredump_segment_header intr_states_seg_hdr;
  1645. u32 intr_states[MAX_CPUS];
  1646. /* segment 32 */
  1647. /* 3 cam words each for 16 unicast,
  1648. * 2 cam words for each of 32 multicast.
  1649. */
  1650. struct mpi_coredump_segment_header cam_entries_seg_hdr;
  1651. u32 cam_entries[(16 * 3) + (32 * 3)];
  1652. /* segment 33 */
  1653. struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
  1654. u32 nic_routing_words[16];
  1655. /* segment 34 */
  1656. struct mpi_coredump_segment_header ets_seg_hdr;
  1657. u32 ets[8+2];
  1658. };
  1659. struct ql_mpi_coredump {
  1660. /* segment 0 */
  1661. struct mpi_coredump_global_header mpi_global_header;
  1662. /* segment 1 */
  1663. struct mpi_coredump_segment_header core_regs_seg_hdr;
  1664. u32 mpi_core_regs[MPI_CORE_REGS_CNT];
  1665. u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
  1666. /* segment 2 */
  1667. struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
  1668. u32 test_logic_regs[TEST_REGS_CNT];
  1669. /* segment 3 */
  1670. struct mpi_coredump_segment_header rmii_regs_seg_hdr;
  1671. u32 rmii_regs[RMII_REGS_CNT];
  1672. /* segment 4 */
  1673. struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
  1674. u32 fcmac1_regs[FCMAC_REGS_CNT];
  1675. /* segment 5 */
  1676. struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
  1677. u32 fcmac2_regs[FCMAC_REGS_CNT];
  1678. /* segment 6 */
  1679. struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
  1680. u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
  1681. /* segment 7 */
  1682. struct mpi_coredump_segment_header ide_regs_seg_hdr;
  1683. u32 ide_regs[IDE_REGS_CNT];
  1684. /* segment 8 */
  1685. struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
  1686. u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
  1687. /* segment 9 */
  1688. struct mpi_coredump_segment_header smbus_regs_seg_hdr;
  1689. u32 smbus_regs[SMBUS_REGS_CNT];
  1690. /* segment 10 */
  1691. struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
  1692. u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
  1693. /* segment 11 */
  1694. struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
  1695. u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
  1696. /* segment 12 */
  1697. struct mpi_coredump_segment_header i2c_regs_seg_hdr;
  1698. u32 i2c_regs[I2C_REGS_CNT];
  1699. /* segment 13 */
  1700. struct mpi_coredump_segment_header memc_regs_seg_hdr;
  1701. u32 memc_regs[MEMC_REGS_CNT];
  1702. /* segment 14 */
  1703. struct mpi_coredump_segment_header pbus_regs_seg_hdr;
  1704. u32 pbus_regs[PBUS_REGS_CNT];
  1705. /* segment 15 */
  1706. struct mpi_coredump_segment_header mde_regs_seg_hdr;
  1707. u32 mde_regs[MDE_REGS_CNT];
  1708. /* segment 16 */
  1709. struct mpi_coredump_segment_header nic_regs_seg_hdr;
  1710. u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
  1711. /* segment 17 */
  1712. struct mpi_coredump_segment_header nic2_regs_seg_hdr;
  1713. u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
  1714. /* segment 18 */
  1715. struct mpi_coredump_segment_header xgmac1_seg_hdr;
  1716. u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
  1717. /* segment 19 */
  1718. struct mpi_coredump_segment_header xgmac2_seg_hdr;
  1719. u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
  1720. /* segment 20 */
  1721. struct mpi_coredump_segment_header code_ram_seg_hdr;
  1722. u32 code_ram[CODE_RAM_CNT];
  1723. /* segment 21 */
  1724. struct mpi_coredump_segment_header memc_ram_seg_hdr;
  1725. u32 memc_ram[MEMC_RAM_CNT];
  1726. /* segment 22 */
  1727. struct mpi_coredump_segment_header xaui_an_hdr;
  1728. u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
  1729. /* segment 23 */
  1730. struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
  1731. u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
  1732. /* segment 24 */
  1733. struct mpi_coredump_segment_header xfi_an_hdr;
  1734. u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
  1735. /* segment 25 */
  1736. struct mpi_coredump_segment_header xfi_train_hdr;
  1737. u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
  1738. /* segment 26 */
  1739. struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
  1740. u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
  1741. /* segment 27 */
  1742. struct mpi_coredump_segment_header xfi_hss_tx_hdr;
  1743. u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
  1744. /* segment 28 */
  1745. struct mpi_coredump_segment_header xfi_hss_rx_hdr;
  1746. u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
  1747. /* segment 29 */
  1748. struct mpi_coredump_segment_header xfi_hss_pll_hdr;
  1749. u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
  1750. /* segment 30 */
  1751. struct mpi_coredump_segment_header misc_nic_seg_hdr;
  1752. struct ql_nic_misc misc_nic_info;
  1753. /* segment 31 */
  1754. /* one interrupt state for each CQ */
  1755. struct mpi_coredump_segment_header intr_states_seg_hdr;
  1756. u32 intr_states[MAX_RX_RINGS];
  1757. /* segment 32 */
  1758. /* 3 cam words each for 16 unicast,
  1759. * 2 cam words for each of 32 multicast.
  1760. */
  1761. struct mpi_coredump_segment_header cam_entries_seg_hdr;
  1762. u32 cam_entries[(16 * 3) + (32 * 3)];
  1763. /* segment 33 */
  1764. struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
  1765. u32 nic_routing_words[16];
  1766. /* segment 34 */
  1767. struct mpi_coredump_segment_header ets_seg_hdr;
  1768. u32 ets[ETS_REGS_DUMP_WORD_COUNT];
  1769. /* segment 35 */
  1770. struct mpi_coredump_segment_header probe_dump_seg_hdr;
  1771. u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
  1772. /* segment 36 */
  1773. struct mpi_coredump_segment_header routing_reg_seg_hdr;
  1774. u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
  1775. /* segment 37 */
  1776. struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
  1777. u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
  1778. /* segment 38 */
  1779. struct mpi_coredump_segment_header xaui2_an_hdr;
  1780. u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
  1781. /* segment 39 */
  1782. struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
  1783. u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
  1784. /* segment 40 */
  1785. struct mpi_coredump_segment_header xfi2_an_hdr;
  1786. u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
  1787. /* segment 41 */
  1788. struct mpi_coredump_segment_header xfi2_train_hdr;
  1789. u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
  1790. /* segment 42 */
  1791. struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
  1792. u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
  1793. /* segment 43 */
  1794. struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
  1795. u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
  1796. /* segment 44 */
  1797. struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
  1798. u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
  1799. /* segment 45 */
  1800. struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
  1801. u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
  1802. /* segment 50 */
  1803. /* semaphore register for all 5 functions */
  1804. struct mpi_coredump_segment_header sem_regs_seg_hdr;
  1805. u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
  1806. };
  1807. /*
  1808. * intr_context structure is used during initialization
  1809. * to hook the interrupts. It is also used in a single
  1810. * irq environment as a context to the ISR.
  1811. */
  1812. struct intr_context {
  1813. struct ql_adapter *qdev;
  1814. u32 intr;
  1815. u32 irq_mask; /* Mask of which rings the vector services. */
  1816. u32 hooked;
  1817. u32 intr_en_mask; /* value/mask used to enable this intr */
  1818. u32 intr_dis_mask; /* value/mask used to disable this intr */
  1819. u32 intr_read_mask; /* value/mask used to read this intr */
  1820. char name[IFNAMSIZ * 2];
  1821. atomic_t irq_cnt; /* irq_cnt is used in single vector
  1822. * environment. It's incremented for each
  1823. * irq handler that is scheduled. When each
  1824. * handler finishes it decrements irq_cnt and
  1825. * enables interrupts if it's zero. */
  1826. irq_handler_t handler;
  1827. };
  1828. /* adapter flags definitions. */
  1829. enum {
  1830. QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
  1831. QL_LEGACY_ENABLED = 1,
  1832. QL_MSI_ENABLED = 2,
  1833. QL_MSIX_ENABLED = 3,
  1834. QL_DMA64 = 4,
  1835. QL_PROMISCUOUS = 5,
  1836. QL_ALLMULTI = 6,
  1837. QL_PORT_CFG = 7,
  1838. QL_CAM_RT_SET = 8,
  1839. QL_SELFTEST = 9,
  1840. QL_LB_LINK_UP = 10,
  1841. QL_FRC_COREDUMP = 11,
  1842. QL_EEH_FATAL = 12,
  1843. };
  1844. /* link_status bit definitions */
  1845. enum {
  1846. STS_LOOPBACK_MASK = 0x00000700,
  1847. STS_LOOPBACK_PCS = 0x00000100,
  1848. STS_LOOPBACK_HSS = 0x00000200,
  1849. STS_LOOPBACK_EXT = 0x00000300,
  1850. STS_PAUSE_MASK = 0x000000c0,
  1851. STS_PAUSE_STD = 0x00000040,
  1852. STS_PAUSE_PRI = 0x00000080,
  1853. STS_SPEED_MASK = 0x00000038,
  1854. STS_SPEED_100Mb = 0x00000000,
  1855. STS_SPEED_1Gb = 0x00000008,
  1856. STS_SPEED_10Gb = 0x00000010,
  1857. STS_LINK_TYPE_MASK = 0x00000007,
  1858. STS_LINK_TYPE_XFI = 0x00000001,
  1859. STS_LINK_TYPE_XAUI = 0x00000002,
  1860. STS_LINK_TYPE_XFI_BP = 0x00000003,
  1861. STS_LINK_TYPE_XAUI_BP = 0x00000004,
  1862. STS_LINK_TYPE_10GBASET = 0x00000005,
  1863. };
  1864. /* link_config bit definitions */
  1865. enum {
  1866. CFG_JUMBO_FRAME_SIZE = 0x00010000,
  1867. CFG_PAUSE_MASK = 0x00000060,
  1868. CFG_PAUSE_STD = 0x00000020,
  1869. CFG_PAUSE_PRI = 0x00000040,
  1870. CFG_DCBX = 0x00000010,
  1871. CFG_LOOPBACK_MASK = 0x00000007,
  1872. CFG_LOOPBACK_PCS = 0x00000002,
  1873. CFG_LOOPBACK_HSS = 0x00000004,
  1874. CFG_LOOPBACK_EXT = 0x00000006,
  1875. CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580,
  1876. };
  1877. struct nic_operations {
  1878. int (*get_flash) (struct ql_adapter *);
  1879. int (*port_initialize) (struct ql_adapter *);
  1880. };
  1881. /*
  1882. * The main Adapter structure definition.
  1883. * This structure has all fields relevant to the hardware.
  1884. */
  1885. struct ql_adapter {
  1886. struct ricb ricb;
  1887. unsigned long flags;
  1888. u32 wol;
  1889. struct nic_stats nic_stats;
  1890. struct vlan_group *vlgrp;
  1891. /* PCI Configuration information for this device */
  1892. struct pci_dev *pdev;
  1893. struct net_device *ndev; /* Parent NET device */
  1894. /* Hardware information */
  1895. u32 chip_rev_id;
  1896. u32 fw_rev_id;
  1897. u32 func; /* PCI function for this adapter */
  1898. u32 alt_func; /* PCI function for alternate adapter */
  1899. u32 port; /* Port number this adapter */
  1900. spinlock_t adapter_lock;
  1901. spinlock_t hw_lock;
  1902. spinlock_t stats_lock;
  1903. /* PCI Bus Relative Register Addresses */
  1904. void __iomem *reg_base;
  1905. void __iomem *doorbell_area;
  1906. u32 doorbell_area_size;
  1907. u32 msg_enable;
  1908. /* Page for Shadow Registers */
  1909. void *rx_ring_shadow_reg_area;
  1910. dma_addr_t rx_ring_shadow_reg_dma;
  1911. void *tx_ring_shadow_reg_area;
  1912. dma_addr_t tx_ring_shadow_reg_dma;
  1913. u32 mailbox_in;
  1914. u32 mailbox_out;
  1915. struct mbox_params idc_mbc;
  1916. int tx_ring_size;
  1917. int rx_ring_size;
  1918. u32 intr_count;
  1919. struct msix_entry *msi_x_entry;
  1920. struct intr_context intr_context[MAX_RX_RINGS];
  1921. int tx_ring_count; /* One per online CPU. */
  1922. u32 rss_ring_count; /* One per irq vector. */
  1923. /*
  1924. * rx_ring_count =
  1925. * (CPU count * outbound completion rx_ring) +
  1926. * (irq_vector_cnt * inbound (RSS) completion rx_ring)
  1927. */
  1928. int rx_ring_count;
  1929. int ring_mem_size;
  1930. void *ring_mem;
  1931. struct rx_ring rx_ring[MAX_RX_RINGS];
  1932. struct tx_ring tx_ring[MAX_TX_RINGS];
  1933. unsigned int lbq_buf_order;
  1934. int rx_csum;
  1935. u32 default_rx_queue;
  1936. u16 rx_coalesce_usecs; /* cqicb->int_delay */
  1937. u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */
  1938. u16 tx_coalesce_usecs; /* cqicb->int_delay */
  1939. u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */
  1940. u32 xg_sem_mask;
  1941. u32 port_link_up;
  1942. u32 port_init;
  1943. u32 link_status;
  1944. struct ql_mpi_coredump *mpi_coredump;
  1945. u32 core_is_dumped;
  1946. u32 link_config;
  1947. u32 led_config;
  1948. u32 max_frame_size;
  1949. union flash_params flash;
  1950. struct workqueue_struct *workqueue;
  1951. struct delayed_work asic_reset_work;
  1952. struct delayed_work mpi_reset_work;
  1953. struct delayed_work mpi_work;
  1954. struct delayed_work mpi_port_cfg_work;
  1955. struct delayed_work mpi_idc_work;
  1956. struct delayed_work mpi_core_to_log;
  1957. struct completion ide_completion;
  1958. struct nic_operations *nic_ops;
  1959. u16 device_id;
  1960. struct timer_list timer;
  1961. atomic_t lb_count;
  1962. };
  1963. /*
  1964. * Typical Register accessor for memory mapped device.
  1965. */
  1966. static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
  1967. {
  1968. return readl(qdev->reg_base + reg);
  1969. }
  1970. /*
  1971. * Typical Register accessor for memory mapped device.
  1972. */
  1973. static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
  1974. {
  1975. writel(val, qdev->reg_base + reg);
  1976. }
  1977. /*
  1978. * Doorbell Registers:
  1979. * Doorbell registers are virtual registers in the PCI memory space.
  1980. * The space is allocated by the chip during PCI initialization. The
  1981. * device driver finds the doorbell address in BAR 3 in PCI config space.
  1982. * The registers are used to control outbound and inbound queues. For
  1983. * example, the producer index for an outbound queue. Each queue uses
  1984. * 1 4k chunk of memory. The lower half of the space is for outbound
  1985. * queues. The upper half is for inbound queues.
  1986. */
  1987. static inline void ql_write_db_reg(u32 val, void __iomem *addr)
  1988. {
  1989. writel(val, addr);
  1990. mmiowb();
  1991. }
  1992. /*
  1993. * Shadow Registers:
  1994. * Outbound queues have a consumer index that is maintained by the chip.
  1995. * Inbound queues have a producer index that is maintained by the chip.
  1996. * For lower overhead, these registers are "shadowed" to host memory
  1997. * which allows the device driver to track the queue progress without
  1998. * PCI reads. When an entry is placed on an inbound queue, the chip will
  1999. * update the relevant index register and then copy the value to the
  2000. * shadow register in host memory.
  2001. */
  2002. static inline u32 ql_read_sh_reg(__le32 *addr)
  2003. {
  2004. u32 reg;
  2005. reg = le32_to_cpu(*addr);
  2006. rmb();
  2007. return reg;
  2008. }
  2009. extern char qlge_driver_name[];
  2010. extern const char qlge_driver_version[];
  2011. extern const struct ethtool_ops qlge_ethtool_ops;
  2012. extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
  2013. extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
  2014. extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
  2015. extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
  2016. u32 *value);
  2017. extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
  2018. extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
  2019. u16 q_id);
  2020. void ql_queue_fw_error(struct ql_adapter *qdev);
  2021. void ql_mpi_work(struct work_struct *work);
  2022. void ql_mpi_reset_work(struct work_struct *work);
  2023. void ql_mpi_core_to_log(struct work_struct *work);
  2024. int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
  2025. void ql_queue_asic_error(struct ql_adapter *qdev);
  2026. u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
  2027. void ql_set_ethtool_ops(struct net_device *ndev);
  2028. int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
  2029. void ql_mpi_idc_work(struct work_struct *work);
  2030. void ql_mpi_port_cfg_work(struct work_struct *work);
  2031. int ql_mb_get_fw_state(struct ql_adapter *qdev);
  2032. int ql_cam_route_initialize(struct ql_adapter *qdev);
  2033. int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
  2034. int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
  2035. int ql_unpause_mpi_risc(struct ql_adapter *qdev);
  2036. int ql_pause_mpi_risc(struct ql_adapter *qdev);
  2037. int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
  2038. int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
  2039. u32 ram_addr, int word_count);
  2040. int ql_core_dump(struct ql_adapter *qdev,
  2041. struct ql_mpi_coredump *mpi_coredump);
  2042. int ql_mb_sys_err(struct ql_adapter *qdev);
  2043. int ql_mb_about_fw(struct ql_adapter *qdev);
  2044. int ql_wol(struct ql_adapter *qdev);
  2045. int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
  2046. int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
  2047. int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
  2048. int ql_mb_get_led_cfg(struct ql_adapter *qdev);
  2049. void ql_link_on(struct ql_adapter *qdev);
  2050. void ql_link_off(struct ql_adapter *qdev);
  2051. int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
  2052. int ql_mb_get_port_cfg(struct ql_adapter *qdev);
  2053. int ql_mb_set_port_cfg(struct ql_adapter *qdev);
  2054. int ql_wait_fifo_empty(struct ql_adapter *qdev);
  2055. void ql_gen_reg_dump(struct ql_adapter *qdev,
  2056. struct ql_reg_dump *mpi_coredump);
  2057. netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
  2058. void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
  2059. int ql_own_firmware(struct ql_adapter *qdev);
  2060. int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
  2061. #if 1
  2062. #define QL_ALL_DUMP
  2063. #define QL_REG_DUMP
  2064. #define QL_DEV_DUMP
  2065. #define QL_CB_DUMP
  2066. /* #define QL_IB_DUMP */
  2067. /* #define QL_OB_DUMP */
  2068. #endif
  2069. #ifdef QL_REG_DUMP
  2070. extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
  2071. extern void ql_dump_routing_entries(struct ql_adapter *qdev);
  2072. extern void ql_dump_regs(struct ql_adapter *qdev);
  2073. #define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
  2074. #define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
  2075. #define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
  2076. #else
  2077. #define QL_DUMP_REGS(qdev)
  2078. #define QL_DUMP_ROUTE(qdev)
  2079. #define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
  2080. #endif
  2081. #ifdef QL_STAT_DUMP
  2082. extern void ql_dump_stat(struct ql_adapter *qdev);
  2083. #define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
  2084. #else
  2085. #define QL_DUMP_STAT(qdev)
  2086. #endif
  2087. #ifdef QL_DEV_DUMP
  2088. extern void ql_dump_qdev(struct ql_adapter *qdev);
  2089. #define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
  2090. #else
  2091. #define QL_DUMP_QDEV(qdev)
  2092. #endif
  2093. #ifdef QL_CB_DUMP
  2094. extern void ql_dump_wqicb(struct wqicb *wqicb);
  2095. extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
  2096. extern void ql_dump_ricb(struct ricb *ricb);
  2097. extern void ql_dump_cqicb(struct cqicb *cqicb);
  2098. extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
  2099. extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
  2100. #define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
  2101. #define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
  2102. #define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
  2103. #define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
  2104. #define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
  2105. #define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
  2106. ql_dump_hw_cb(qdev, size, bit, q_id)
  2107. #else
  2108. #define QL_DUMP_RICB(ricb)
  2109. #define QL_DUMP_WQICB(wqicb)
  2110. #define QL_DUMP_TX_RING(tx_ring)
  2111. #define QL_DUMP_CQICB(cqicb)
  2112. #define QL_DUMP_RX_RING(rx_ring)
  2113. #define QL_DUMP_HW_CB(qdev, size, bit, q_id)
  2114. #endif
  2115. #ifdef QL_OB_DUMP
  2116. extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
  2117. extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
  2118. extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
  2119. #define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
  2120. #define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
  2121. #else
  2122. #define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
  2123. #define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
  2124. #endif
  2125. #ifdef QL_IB_DUMP
  2126. extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
  2127. #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
  2128. #else
  2129. #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
  2130. #endif
  2131. #ifdef QL_ALL_DUMP
  2132. extern void ql_dump_all(struct ql_adapter *qdev);
  2133. #define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
  2134. #else
  2135. #define QL_DUMP_ALL(qdev)
  2136. #endif
  2137. #endif /* _QLGE_H_ */