qlcnic_hw.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788
  1. /*
  2. * QLogic qlcnic NIC Driver
  3. * Copyright (c) 2009-2010 QLogic Corporation
  4. *
  5. * See LICENSE.qlcnic for copyright and licensing details.
  6. */
  7. #include "qlcnic.h"
  8. #include <linux/slab.h>
  9. #include <net/ip.h>
  10. #include <linux/bitops.h>
  11. #define MASK(n) ((1ULL<<(n))-1)
  12. #define OCM_WIN_P3P(addr) (addr & 0xffc0000)
  13. #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
  14. #define CRB_BLK(off) ((off >> 20) & 0x3f)
  15. #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
  16. #define CRB_WINDOW_2M (0x130060)
  17. #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
  18. #define CRB_INDIRECT_2M (0x1e0000UL)
  19. #ifndef readq
  20. static inline u64 readq(void __iomem *addr)
  21. {
  22. return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
  23. }
  24. #endif
  25. #ifndef writeq
  26. static inline void writeq(u64 val, void __iomem *addr)
  27. {
  28. writel(((u32) (val)), (addr));
  29. writel(((u32) (val >> 32)), (addr + 4));
  30. }
  31. #endif
  32. static const struct crb_128M_2M_block_map
  33. crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
  34. {{{0, 0, 0, 0} } }, /* 0: PCI */
  35. {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
  36. {1, 0x0110000, 0x0120000, 0x130000},
  37. {1, 0x0120000, 0x0122000, 0x124000},
  38. {1, 0x0130000, 0x0132000, 0x126000},
  39. {1, 0x0140000, 0x0142000, 0x128000},
  40. {1, 0x0150000, 0x0152000, 0x12a000},
  41. {1, 0x0160000, 0x0170000, 0x110000},
  42. {1, 0x0170000, 0x0172000, 0x12e000},
  43. {0, 0x0000000, 0x0000000, 0x000000},
  44. {0, 0x0000000, 0x0000000, 0x000000},
  45. {0, 0x0000000, 0x0000000, 0x000000},
  46. {0, 0x0000000, 0x0000000, 0x000000},
  47. {0, 0x0000000, 0x0000000, 0x000000},
  48. {0, 0x0000000, 0x0000000, 0x000000},
  49. {1, 0x01e0000, 0x01e0800, 0x122000},
  50. {0, 0x0000000, 0x0000000, 0x000000} } },
  51. {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
  52. {{{0, 0, 0, 0} } }, /* 3: */
  53. {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
  54. {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
  55. {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
  56. {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
  57. {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
  58. {0, 0x0000000, 0x0000000, 0x000000},
  59. {0, 0x0000000, 0x0000000, 0x000000},
  60. {0, 0x0000000, 0x0000000, 0x000000},
  61. {0, 0x0000000, 0x0000000, 0x000000},
  62. {0, 0x0000000, 0x0000000, 0x000000},
  63. {0, 0x0000000, 0x0000000, 0x000000},
  64. {0, 0x0000000, 0x0000000, 0x000000},
  65. {0, 0x0000000, 0x0000000, 0x000000},
  66. {0, 0x0000000, 0x0000000, 0x000000},
  67. {0, 0x0000000, 0x0000000, 0x000000},
  68. {0, 0x0000000, 0x0000000, 0x000000},
  69. {0, 0x0000000, 0x0000000, 0x000000},
  70. {0, 0x0000000, 0x0000000, 0x000000},
  71. {0, 0x0000000, 0x0000000, 0x000000},
  72. {1, 0x08f0000, 0x08f2000, 0x172000} } },
  73. {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
  74. {0, 0x0000000, 0x0000000, 0x000000},
  75. {0, 0x0000000, 0x0000000, 0x000000},
  76. {0, 0x0000000, 0x0000000, 0x000000},
  77. {0, 0x0000000, 0x0000000, 0x000000},
  78. {0, 0x0000000, 0x0000000, 0x000000},
  79. {0, 0x0000000, 0x0000000, 0x000000},
  80. {0, 0x0000000, 0x0000000, 0x000000},
  81. {0, 0x0000000, 0x0000000, 0x000000},
  82. {0, 0x0000000, 0x0000000, 0x000000},
  83. {0, 0x0000000, 0x0000000, 0x000000},
  84. {0, 0x0000000, 0x0000000, 0x000000},
  85. {0, 0x0000000, 0x0000000, 0x000000},
  86. {0, 0x0000000, 0x0000000, 0x000000},
  87. {0, 0x0000000, 0x0000000, 0x000000},
  88. {1, 0x09f0000, 0x09f2000, 0x176000} } },
  89. {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
  90. {0, 0x0000000, 0x0000000, 0x000000},
  91. {0, 0x0000000, 0x0000000, 0x000000},
  92. {0, 0x0000000, 0x0000000, 0x000000},
  93. {0, 0x0000000, 0x0000000, 0x000000},
  94. {0, 0x0000000, 0x0000000, 0x000000},
  95. {0, 0x0000000, 0x0000000, 0x000000},
  96. {0, 0x0000000, 0x0000000, 0x000000},
  97. {0, 0x0000000, 0x0000000, 0x000000},
  98. {0, 0x0000000, 0x0000000, 0x000000},
  99. {0, 0x0000000, 0x0000000, 0x000000},
  100. {0, 0x0000000, 0x0000000, 0x000000},
  101. {0, 0x0000000, 0x0000000, 0x000000},
  102. {0, 0x0000000, 0x0000000, 0x000000},
  103. {0, 0x0000000, 0x0000000, 0x000000},
  104. {1, 0x0af0000, 0x0af2000, 0x17a000} } },
  105. {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
  106. {0, 0x0000000, 0x0000000, 0x000000},
  107. {0, 0x0000000, 0x0000000, 0x000000},
  108. {0, 0x0000000, 0x0000000, 0x000000},
  109. {0, 0x0000000, 0x0000000, 0x000000},
  110. {0, 0x0000000, 0x0000000, 0x000000},
  111. {0, 0x0000000, 0x0000000, 0x000000},
  112. {0, 0x0000000, 0x0000000, 0x000000},
  113. {0, 0x0000000, 0x0000000, 0x000000},
  114. {0, 0x0000000, 0x0000000, 0x000000},
  115. {0, 0x0000000, 0x0000000, 0x000000},
  116. {0, 0x0000000, 0x0000000, 0x000000},
  117. {0, 0x0000000, 0x0000000, 0x000000},
  118. {0, 0x0000000, 0x0000000, 0x000000},
  119. {0, 0x0000000, 0x0000000, 0x000000},
  120. {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
  121. {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
  122. {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
  123. {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
  124. {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
  125. {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
  126. {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
  127. {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
  128. {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
  129. {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
  130. {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
  131. {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
  132. {{{0, 0, 0, 0} } }, /* 23: */
  133. {{{0, 0, 0, 0} } }, /* 24: */
  134. {{{0, 0, 0, 0} } }, /* 25: */
  135. {{{0, 0, 0, 0} } }, /* 26: */
  136. {{{0, 0, 0, 0} } }, /* 27: */
  137. {{{0, 0, 0, 0} } }, /* 28: */
  138. {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
  139. {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
  140. {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
  141. {{{0} } }, /* 32: PCI */
  142. {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
  143. {1, 0x2110000, 0x2120000, 0x130000},
  144. {1, 0x2120000, 0x2122000, 0x124000},
  145. {1, 0x2130000, 0x2132000, 0x126000},
  146. {1, 0x2140000, 0x2142000, 0x128000},
  147. {1, 0x2150000, 0x2152000, 0x12a000},
  148. {1, 0x2160000, 0x2170000, 0x110000},
  149. {1, 0x2170000, 0x2172000, 0x12e000},
  150. {0, 0x0000000, 0x0000000, 0x000000},
  151. {0, 0x0000000, 0x0000000, 0x000000},
  152. {0, 0x0000000, 0x0000000, 0x000000},
  153. {0, 0x0000000, 0x0000000, 0x000000},
  154. {0, 0x0000000, 0x0000000, 0x000000},
  155. {0, 0x0000000, 0x0000000, 0x000000},
  156. {0, 0x0000000, 0x0000000, 0x000000},
  157. {0, 0x0000000, 0x0000000, 0x000000} } },
  158. {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
  159. {{{0} } }, /* 35: */
  160. {{{0} } }, /* 36: */
  161. {{{0} } }, /* 37: */
  162. {{{0} } }, /* 38: */
  163. {{{0} } }, /* 39: */
  164. {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
  165. {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
  166. {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
  167. {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
  168. {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
  169. {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
  170. {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
  171. {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
  172. {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
  173. {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
  174. {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
  175. {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
  176. {{{0} } }, /* 52: */
  177. {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
  178. {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
  179. {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
  180. {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
  181. {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
  182. {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
  183. {{{0} } }, /* 59: I2C0 */
  184. {{{0} } }, /* 60: I2C1 */
  185. {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
  186. {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
  187. {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
  188. };
  189. /*
  190. * top 12 bits of crb internal address (hub, agent)
  191. */
  192. static const unsigned crb_hub_agt[64] = {
  193. 0,
  194. QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
  195. QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
  196. QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
  197. 0,
  198. QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
  199. QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
  200. QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
  201. QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
  202. QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
  203. QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
  204. QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
  205. QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
  206. QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
  207. QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
  208. QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
  209. QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
  210. QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
  211. QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
  212. QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
  213. QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
  214. QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
  215. QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
  216. QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
  217. QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
  218. QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
  219. QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
  220. 0,
  221. QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
  222. QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
  223. 0,
  224. QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
  225. 0,
  226. QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
  227. QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
  228. 0,
  229. 0,
  230. 0,
  231. 0,
  232. 0,
  233. QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
  234. 0,
  235. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
  236. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
  237. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
  238. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
  239. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
  240. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
  241. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
  242. QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
  243. QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
  244. QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
  245. 0,
  246. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
  247. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
  248. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
  249. QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
  250. 0,
  251. QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
  252. QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
  253. QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
  254. 0,
  255. QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
  256. 0,
  257. };
  258. /* PCI Windowing for DDR regions. */
  259. #define QLCNIC_PCIE_SEM_TIMEOUT 10000
  260. int
  261. qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
  262. {
  263. int done = 0, timeout = 0;
  264. while (!done) {
  265. done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
  266. if (done == 1)
  267. break;
  268. if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
  269. dev_err(&adapter->pdev->dev,
  270. "Failed to acquire sem=%d lock; holdby=%d\n",
  271. sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
  272. return -EIO;
  273. }
  274. msleep(1);
  275. }
  276. if (id_reg)
  277. QLCWR32(adapter, id_reg, adapter->portnum);
  278. return 0;
  279. }
  280. void
  281. qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
  282. {
  283. QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
  284. }
  285. static int
  286. qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
  287. struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
  288. {
  289. u32 i, producer, consumer;
  290. struct qlcnic_cmd_buffer *pbuf;
  291. struct cmd_desc_type0 *cmd_desc;
  292. struct qlcnic_host_tx_ring *tx_ring;
  293. i = 0;
  294. if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
  295. return -EIO;
  296. tx_ring = adapter->tx_ring;
  297. __netif_tx_lock_bh(tx_ring->txq);
  298. producer = tx_ring->producer;
  299. consumer = tx_ring->sw_consumer;
  300. if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
  301. netif_tx_stop_queue(tx_ring->txq);
  302. smp_mb();
  303. if (qlcnic_tx_avail(tx_ring) > nr_desc) {
  304. if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
  305. netif_tx_wake_queue(tx_ring->txq);
  306. } else {
  307. adapter->stats.xmit_off++;
  308. __netif_tx_unlock_bh(tx_ring->txq);
  309. return -EBUSY;
  310. }
  311. }
  312. do {
  313. cmd_desc = &cmd_desc_arr[i];
  314. pbuf = &tx_ring->cmd_buf_arr[producer];
  315. pbuf->skb = NULL;
  316. pbuf->frag_count = 0;
  317. memcpy(&tx_ring->desc_head[producer],
  318. &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
  319. producer = get_next_index(producer, tx_ring->num_desc);
  320. i++;
  321. } while (i != nr_desc);
  322. tx_ring->producer = producer;
  323. qlcnic_update_cmd_producer(adapter, tx_ring);
  324. __netif_tx_unlock_bh(tx_ring->txq);
  325. return 0;
  326. }
  327. static int
  328. qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
  329. __le16 vlan_id, unsigned op)
  330. {
  331. struct qlcnic_nic_req req;
  332. struct qlcnic_mac_req *mac_req;
  333. struct qlcnic_vlan_req *vlan_req;
  334. u64 word;
  335. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  336. req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
  337. word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
  338. req.req_hdr = cpu_to_le64(word);
  339. mac_req = (struct qlcnic_mac_req *)&req.words[0];
  340. mac_req->op = op;
  341. memcpy(mac_req->mac_addr, addr, 6);
  342. vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
  343. vlan_req->vlan_id = vlan_id;
  344. return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  345. }
  346. static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
  347. {
  348. struct list_head *head;
  349. struct qlcnic_mac_list_s *cur;
  350. /* look up if already exists */
  351. list_for_each(head, &adapter->mac_list) {
  352. cur = list_entry(head, struct qlcnic_mac_list_s, list);
  353. if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
  354. return 0;
  355. }
  356. cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
  357. if (cur == NULL) {
  358. dev_err(&adapter->netdev->dev,
  359. "failed to add mac address filter\n");
  360. return -ENOMEM;
  361. }
  362. memcpy(cur->mac_addr, addr, ETH_ALEN);
  363. if (qlcnic_sre_macaddr_change(adapter,
  364. cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
  365. kfree(cur);
  366. return -EIO;
  367. }
  368. list_add_tail(&cur->list, &adapter->mac_list);
  369. return 0;
  370. }
  371. void qlcnic_set_multi(struct net_device *netdev)
  372. {
  373. struct qlcnic_adapter *adapter = netdev_priv(netdev);
  374. struct netdev_hw_addr *ha;
  375. static const u8 bcast_addr[ETH_ALEN] = {
  376. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  377. };
  378. u32 mode = VPORT_MISS_MODE_DROP;
  379. if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
  380. return;
  381. qlcnic_nic_add_mac(adapter, adapter->mac_addr);
  382. qlcnic_nic_add_mac(adapter, bcast_addr);
  383. if (netdev->flags & IFF_PROMISC) {
  384. if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
  385. mode = VPORT_MISS_MODE_ACCEPT_ALL;
  386. goto send_fw_cmd;
  387. }
  388. if ((netdev->flags & IFF_ALLMULTI) ||
  389. (netdev_mc_count(netdev) > adapter->max_mc_count)) {
  390. mode = VPORT_MISS_MODE_ACCEPT_MULTI;
  391. goto send_fw_cmd;
  392. }
  393. if (!netdev_mc_empty(netdev)) {
  394. netdev_for_each_mc_addr(ha, netdev) {
  395. qlcnic_nic_add_mac(adapter, ha->addr);
  396. }
  397. }
  398. send_fw_cmd:
  399. if (mode == VPORT_MISS_MODE_ACCEPT_ALL) {
  400. qlcnic_alloc_lb_filters_mem(adapter);
  401. adapter->mac_learn = 1;
  402. } else {
  403. adapter->mac_learn = 0;
  404. }
  405. qlcnic_nic_set_promisc(adapter, mode);
  406. }
  407. int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
  408. {
  409. struct qlcnic_nic_req req;
  410. u64 word;
  411. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  412. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  413. word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
  414. ((u64)adapter->portnum << 16);
  415. req.req_hdr = cpu_to_le64(word);
  416. req.words[0] = cpu_to_le64(mode);
  417. return qlcnic_send_cmd_descs(adapter,
  418. (struct cmd_desc_type0 *)&req, 1);
  419. }
  420. void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
  421. {
  422. struct qlcnic_mac_list_s *cur;
  423. struct list_head *head = &adapter->mac_list;
  424. while (!list_empty(head)) {
  425. cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
  426. qlcnic_sre_macaddr_change(adapter,
  427. cur->mac_addr, 0, QLCNIC_MAC_DEL);
  428. list_del(&cur->list);
  429. kfree(cur);
  430. }
  431. }
  432. void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
  433. {
  434. struct qlcnic_filter *tmp_fil;
  435. struct hlist_node *tmp_hnode, *n;
  436. struct hlist_head *head;
  437. int i;
  438. for (i = 0; i < adapter->fhash.fmax; i++) {
  439. head = &(adapter->fhash.fhead[i]);
  440. hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
  441. {
  442. if (jiffies >
  443. (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
  444. qlcnic_sre_macaddr_change(adapter,
  445. tmp_fil->faddr, tmp_fil->vlan_id,
  446. tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
  447. QLCNIC_MAC_DEL);
  448. spin_lock_bh(&adapter->mac_learn_lock);
  449. adapter->fhash.fnum--;
  450. hlist_del(&tmp_fil->fnode);
  451. spin_unlock_bh(&adapter->mac_learn_lock);
  452. kfree(tmp_fil);
  453. }
  454. }
  455. }
  456. }
  457. void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
  458. {
  459. struct qlcnic_filter *tmp_fil;
  460. struct hlist_node *tmp_hnode, *n;
  461. struct hlist_head *head;
  462. int i;
  463. for (i = 0; i < adapter->fhash.fmax; i++) {
  464. head = &(adapter->fhash.fhead[i]);
  465. hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
  466. qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
  467. tmp_fil->vlan_id, tmp_fil->vlan_id ?
  468. QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
  469. spin_lock_bh(&adapter->mac_learn_lock);
  470. adapter->fhash.fnum--;
  471. hlist_del(&tmp_fil->fnode);
  472. spin_unlock_bh(&adapter->mac_learn_lock);
  473. kfree(tmp_fil);
  474. }
  475. }
  476. }
  477. int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
  478. {
  479. struct qlcnic_nic_req req;
  480. int rv;
  481. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  482. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  483. req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
  484. ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32));
  485. req.words[0] = cpu_to_le64(flag);
  486. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  487. if (rv != 0)
  488. dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n",
  489. flag ? "Set" : "Reset");
  490. return rv;
  491. }
  492. int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
  493. {
  494. if (qlcnic_set_fw_loopback(adapter, mode))
  495. return -EIO;
  496. if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) {
  497. qlcnic_set_fw_loopback(adapter, 0);
  498. return -EIO;
  499. }
  500. msleep(1000);
  501. return 0;
  502. }
  503. void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
  504. {
  505. int mode = VPORT_MISS_MODE_DROP;
  506. struct net_device *netdev = adapter->netdev;
  507. qlcnic_set_fw_loopback(adapter, 0);
  508. if (netdev->flags & IFF_PROMISC)
  509. mode = VPORT_MISS_MODE_ACCEPT_ALL;
  510. else if (netdev->flags & IFF_ALLMULTI)
  511. mode = VPORT_MISS_MODE_ACCEPT_MULTI;
  512. qlcnic_nic_set_promisc(adapter, mode);
  513. msleep(1000);
  514. }
  515. /*
  516. * Send the interrupt coalescing parameter set by ethtool to the card.
  517. */
  518. int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
  519. {
  520. struct qlcnic_nic_req req;
  521. int rv;
  522. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  523. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  524. req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
  525. ((u64) adapter->portnum << 16));
  526. req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
  527. req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
  528. ((u64) adapter->ahw->coal.rx_time_us) << 16);
  529. req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
  530. ((u64) adapter->ahw->coal.type) << 32 |
  531. ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
  532. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  533. if (rv != 0)
  534. dev_err(&adapter->netdev->dev,
  535. "Could not send interrupt coalescing parameters\n");
  536. return rv;
  537. }
  538. int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
  539. {
  540. struct qlcnic_nic_req req;
  541. u64 word;
  542. int rv;
  543. if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
  544. return 0;
  545. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  546. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  547. word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
  548. req.req_hdr = cpu_to_le64(word);
  549. req.words[0] = cpu_to_le64(enable);
  550. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  551. if (rv != 0)
  552. dev_err(&adapter->netdev->dev,
  553. "Could not send configure hw lro request\n");
  554. return rv;
  555. }
  556. int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
  557. {
  558. struct qlcnic_nic_req req;
  559. u64 word;
  560. int rv;
  561. if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
  562. return 0;
  563. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  564. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  565. word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
  566. ((u64)adapter->portnum << 16);
  567. req.req_hdr = cpu_to_le64(word);
  568. req.words[0] = cpu_to_le64(enable);
  569. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  570. if (rv != 0)
  571. dev_err(&adapter->netdev->dev,
  572. "Could not send configure bridge mode request\n");
  573. adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
  574. return rv;
  575. }
  576. #define RSS_HASHTYPE_IP_TCP 0x3
  577. int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
  578. {
  579. struct qlcnic_nic_req req;
  580. u64 word;
  581. int i, rv;
  582. static const u64 key[] = {
  583. 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
  584. 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
  585. 0x255b0ec26d5a56daULL
  586. };
  587. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  588. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  589. word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
  590. req.req_hdr = cpu_to_le64(word);
  591. /*
  592. * RSS request:
  593. * bits 3-0: hash_method
  594. * 5-4: hash_type_ipv4
  595. * 7-6: hash_type_ipv6
  596. * 8: enable
  597. * 9: use indirection table
  598. * 47-10: reserved
  599. * 63-48: indirection table mask
  600. */
  601. word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
  602. ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
  603. ((u64)(enable & 0x1) << 8) |
  604. ((0x7ULL) << 48);
  605. req.words[0] = cpu_to_le64(word);
  606. for (i = 0; i < 5; i++)
  607. req.words[i+1] = cpu_to_le64(key[i]);
  608. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  609. if (rv != 0)
  610. dev_err(&adapter->netdev->dev, "could not configure RSS\n");
  611. return rv;
  612. }
  613. int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
  614. {
  615. struct qlcnic_nic_req req;
  616. struct qlcnic_ipaddr *ipa;
  617. u64 word;
  618. int rv;
  619. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  620. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  621. word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
  622. req.req_hdr = cpu_to_le64(word);
  623. req.words[0] = cpu_to_le64(cmd);
  624. ipa = (struct qlcnic_ipaddr *)&req.words[1];
  625. ipa->ipv4 = ip;
  626. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  627. if (rv != 0)
  628. dev_err(&adapter->netdev->dev,
  629. "could not notify %s IP 0x%x reuqest\n",
  630. (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
  631. return rv;
  632. }
  633. int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
  634. {
  635. struct qlcnic_nic_req req;
  636. u64 word;
  637. int rv;
  638. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  639. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  640. word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
  641. req.req_hdr = cpu_to_le64(word);
  642. req.words[0] = cpu_to_le64(enable | (enable << 8));
  643. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  644. if (rv != 0)
  645. dev_err(&adapter->netdev->dev,
  646. "could not configure link notification\n");
  647. return rv;
  648. }
  649. int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
  650. {
  651. struct qlcnic_nic_req req;
  652. u64 word;
  653. int rv;
  654. if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
  655. return 0;
  656. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  657. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  658. word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
  659. ((u64)adapter->portnum << 16) |
  660. ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
  661. req.req_hdr = cpu_to_le64(word);
  662. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  663. if (rv != 0)
  664. dev_err(&adapter->netdev->dev,
  665. "could not cleanup lro flows\n");
  666. return rv;
  667. }
  668. /*
  669. * qlcnic_change_mtu - Change the Maximum Transfer Unit
  670. * @returns 0 on success, negative on failure
  671. */
  672. int qlcnic_change_mtu(struct net_device *netdev, int mtu)
  673. {
  674. struct qlcnic_adapter *adapter = netdev_priv(netdev);
  675. int rc = 0;
  676. if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
  677. dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
  678. " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
  679. return -EINVAL;
  680. }
  681. rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
  682. if (!rc)
  683. netdev->mtu = mtu;
  684. return rc;
  685. }
  686. netdev_features_t qlcnic_fix_features(struct net_device *netdev,
  687. netdev_features_t features)
  688. {
  689. struct qlcnic_adapter *adapter = netdev_priv(netdev);
  690. if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
  691. netdev_features_t changed = features ^ netdev->features;
  692. features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
  693. }
  694. if (!(features & NETIF_F_RXCSUM))
  695. features &= ~NETIF_F_LRO;
  696. return features;
  697. }
  698. int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
  699. {
  700. struct qlcnic_adapter *adapter = netdev_priv(netdev);
  701. netdev_features_t changed = netdev->features ^ features;
  702. int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
  703. if (!(changed & NETIF_F_LRO))
  704. return 0;
  705. netdev->features = features ^ NETIF_F_LRO;
  706. if (qlcnic_config_hw_lro(adapter, hw_lro))
  707. return -EIO;
  708. if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
  709. return -EIO;
  710. return 0;
  711. }
  712. /*
  713. * Changes the CRB window to the specified window.
  714. */
  715. /* Returns < 0 if off is not valid,
  716. * 1 if window access is needed. 'off' is set to offset from
  717. * CRB space in 128M pci map
  718. * 0 if no window access is needed. 'off' is set to 2M addr
  719. * In: 'off' is offset from base in 128M pci map
  720. */
  721. static int
  722. qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
  723. ulong off, void __iomem **addr)
  724. {
  725. const struct crb_128M_2M_sub_block_map *m;
  726. if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
  727. return -EINVAL;
  728. off -= QLCNIC_PCI_CRBSPACE;
  729. /*
  730. * Try direct map
  731. */
  732. m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
  733. if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
  734. *addr = adapter->ahw->pci_base0 + m->start_2M +
  735. (off - m->start_128M);
  736. return 0;
  737. }
  738. /*
  739. * Not in direct map, use crb window
  740. */
  741. *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
  742. return 1;
  743. }
  744. /*
  745. * In: 'off' is offset from CRB space in 128M pci map
  746. * Out: 'off' is 2M pci map addr
  747. * side effect: lock crb window
  748. */
  749. static int
  750. qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
  751. {
  752. u32 window;
  753. void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
  754. off -= QLCNIC_PCI_CRBSPACE;
  755. window = CRB_HI(off);
  756. if (window == 0) {
  757. dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
  758. return -EIO;
  759. }
  760. writel(window, addr);
  761. if (readl(addr) != window) {
  762. if (printk_ratelimit())
  763. dev_warn(&adapter->pdev->dev,
  764. "failed to set CRB window to %d off 0x%lx\n",
  765. window, off);
  766. return -EIO;
  767. }
  768. return 0;
  769. }
  770. int
  771. qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
  772. {
  773. unsigned long flags;
  774. int rv;
  775. void __iomem *addr = NULL;
  776. rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
  777. if (rv == 0) {
  778. writel(data, addr);
  779. return 0;
  780. }
  781. if (rv > 0) {
  782. /* indirect access */
  783. write_lock_irqsave(&adapter->ahw->crb_lock, flags);
  784. crb_win_lock(adapter);
  785. rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
  786. if (!rv)
  787. writel(data, addr);
  788. crb_win_unlock(adapter);
  789. write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
  790. return rv;
  791. }
  792. dev_err(&adapter->pdev->dev,
  793. "%s: invalid offset: 0x%016lx\n", __func__, off);
  794. dump_stack();
  795. return -EIO;
  796. }
  797. u32
  798. qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
  799. {
  800. unsigned long flags;
  801. int rv;
  802. u32 data = -1;
  803. void __iomem *addr = NULL;
  804. rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
  805. if (rv == 0)
  806. return readl(addr);
  807. if (rv > 0) {
  808. /* indirect access */
  809. write_lock_irqsave(&adapter->ahw->crb_lock, flags);
  810. crb_win_lock(adapter);
  811. if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
  812. data = readl(addr);
  813. crb_win_unlock(adapter);
  814. write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
  815. return data;
  816. }
  817. dev_err(&adapter->pdev->dev,
  818. "%s: invalid offset: 0x%016lx\n", __func__, off);
  819. dump_stack();
  820. return -1;
  821. }
  822. void __iomem *
  823. qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
  824. {
  825. void __iomem *addr = NULL;
  826. WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
  827. return addr;
  828. }
  829. static int
  830. qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
  831. u64 addr, u32 *start)
  832. {
  833. u32 window;
  834. window = OCM_WIN_P3P(addr);
  835. writel(window, adapter->ahw->ocm_win_crb);
  836. /* read back to flush */
  837. readl(adapter->ahw->ocm_win_crb);
  838. *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
  839. return 0;
  840. }
  841. static int
  842. qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
  843. u64 *data, int op)
  844. {
  845. void __iomem *addr;
  846. int ret;
  847. u32 start;
  848. mutex_lock(&adapter->ahw->mem_lock);
  849. ret = qlcnic_pci_set_window_2M(adapter, off, &start);
  850. if (ret != 0)
  851. goto unlock;
  852. addr = adapter->ahw->pci_base0 + start;
  853. if (op == 0) /* read */
  854. *data = readq(addr);
  855. else /* write */
  856. writeq(*data, addr);
  857. unlock:
  858. mutex_unlock(&adapter->ahw->mem_lock);
  859. return ret;
  860. }
  861. void
  862. qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
  863. {
  864. void __iomem *addr = adapter->ahw->pci_base0 +
  865. QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
  866. mutex_lock(&adapter->ahw->mem_lock);
  867. *data = readq(addr);
  868. mutex_unlock(&adapter->ahw->mem_lock);
  869. }
  870. void
  871. qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
  872. {
  873. void __iomem *addr = adapter->ahw->pci_base0 +
  874. QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
  875. mutex_lock(&adapter->ahw->mem_lock);
  876. writeq(data, addr);
  877. mutex_unlock(&adapter->ahw->mem_lock);
  878. }
  879. #define MAX_CTL_CHECK 1000
  880. int
  881. qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
  882. u64 off, u64 data)
  883. {
  884. int i, j, ret;
  885. u32 temp, off8;
  886. void __iomem *mem_crb;
  887. /* Only 64-bit aligned access */
  888. if (off & 7)
  889. return -EIO;
  890. /* P3 onward, test agent base for MIU and SIU is same */
  891. if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
  892. QLCNIC_ADDR_QDR_NET_MAX)) {
  893. mem_crb = qlcnic_get_ioaddr(adapter,
  894. QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
  895. goto correct;
  896. }
  897. if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
  898. mem_crb = qlcnic_get_ioaddr(adapter,
  899. QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
  900. goto correct;
  901. }
  902. if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
  903. return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
  904. return -EIO;
  905. correct:
  906. off8 = off & ~0xf;
  907. mutex_lock(&adapter->ahw->mem_lock);
  908. writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
  909. writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
  910. i = 0;
  911. writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
  912. writel((TA_CTL_START | TA_CTL_ENABLE),
  913. (mem_crb + TEST_AGT_CTRL));
  914. for (j = 0; j < MAX_CTL_CHECK; j++) {
  915. temp = readl(mem_crb + TEST_AGT_CTRL);
  916. if ((temp & TA_CTL_BUSY) == 0)
  917. break;
  918. }
  919. if (j >= MAX_CTL_CHECK) {
  920. ret = -EIO;
  921. goto done;
  922. }
  923. i = (off & 0xf) ? 0 : 2;
  924. writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
  925. mem_crb + MIU_TEST_AGT_WRDATA(i));
  926. writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
  927. mem_crb + MIU_TEST_AGT_WRDATA(i+1));
  928. i = (off & 0xf) ? 2 : 0;
  929. writel(data & 0xffffffff,
  930. mem_crb + MIU_TEST_AGT_WRDATA(i));
  931. writel((data >> 32) & 0xffffffff,
  932. mem_crb + MIU_TEST_AGT_WRDATA(i+1));
  933. writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
  934. writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
  935. (mem_crb + TEST_AGT_CTRL));
  936. for (j = 0; j < MAX_CTL_CHECK; j++) {
  937. temp = readl(mem_crb + TEST_AGT_CTRL);
  938. if ((temp & TA_CTL_BUSY) == 0)
  939. break;
  940. }
  941. if (j >= MAX_CTL_CHECK) {
  942. if (printk_ratelimit())
  943. dev_err(&adapter->pdev->dev,
  944. "failed to write through agent\n");
  945. ret = -EIO;
  946. } else
  947. ret = 0;
  948. done:
  949. mutex_unlock(&adapter->ahw->mem_lock);
  950. return ret;
  951. }
  952. int
  953. qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
  954. u64 off, u64 *data)
  955. {
  956. int j, ret;
  957. u32 temp, off8;
  958. u64 val;
  959. void __iomem *mem_crb;
  960. /* Only 64-bit aligned access */
  961. if (off & 7)
  962. return -EIO;
  963. /* P3 onward, test agent base for MIU and SIU is same */
  964. if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
  965. QLCNIC_ADDR_QDR_NET_MAX)) {
  966. mem_crb = qlcnic_get_ioaddr(adapter,
  967. QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
  968. goto correct;
  969. }
  970. if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
  971. mem_crb = qlcnic_get_ioaddr(adapter,
  972. QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
  973. goto correct;
  974. }
  975. if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
  976. return qlcnic_pci_mem_access_direct(adapter,
  977. off, data, 0);
  978. }
  979. return -EIO;
  980. correct:
  981. off8 = off & ~0xf;
  982. mutex_lock(&adapter->ahw->mem_lock);
  983. writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
  984. writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
  985. writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
  986. writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
  987. for (j = 0; j < MAX_CTL_CHECK; j++) {
  988. temp = readl(mem_crb + TEST_AGT_CTRL);
  989. if ((temp & TA_CTL_BUSY) == 0)
  990. break;
  991. }
  992. if (j >= MAX_CTL_CHECK) {
  993. if (printk_ratelimit())
  994. dev_err(&adapter->pdev->dev,
  995. "failed to read through agent\n");
  996. ret = -EIO;
  997. } else {
  998. off8 = MIU_TEST_AGT_RDDATA_LO;
  999. if (off & 0xf)
  1000. off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
  1001. temp = readl(mem_crb + off8 + 4);
  1002. val = (u64)temp << 32;
  1003. val |= readl(mem_crb + off8);
  1004. *data = val;
  1005. ret = 0;
  1006. }
  1007. mutex_unlock(&adapter->ahw->mem_lock);
  1008. return ret;
  1009. }
  1010. int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
  1011. {
  1012. int offset, board_type, magic;
  1013. struct pci_dev *pdev = adapter->pdev;
  1014. offset = QLCNIC_FW_MAGIC_OFFSET;
  1015. if (qlcnic_rom_fast_read(adapter, offset, &magic))
  1016. return -EIO;
  1017. if (magic != QLCNIC_BDINFO_MAGIC) {
  1018. dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
  1019. magic);
  1020. return -EIO;
  1021. }
  1022. offset = QLCNIC_BRDTYPE_OFFSET;
  1023. if (qlcnic_rom_fast_read(adapter, offset, &board_type))
  1024. return -EIO;
  1025. adapter->ahw->board_type = board_type;
  1026. if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
  1027. u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
  1028. if ((gpio & 0x8000) == 0)
  1029. board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
  1030. }
  1031. switch (board_type) {
  1032. case QLCNIC_BRDTYPE_P3P_HMEZ:
  1033. case QLCNIC_BRDTYPE_P3P_XG_LOM:
  1034. case QLCNIC_BRDTYPE_P3P_10G_CX4:
  1035. case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
  1036. case QLCNIC_BRDTYPE_P3P_IMEZ:
  1037. case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
  1038. case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
  1039. case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
  1040. case QLCNIC_BRDTYPE_P3P_10G_XFP:
  1041. case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
  1042. adapter->ahw->port_type = QLCNIC_XGBE;
  1043. break;
  1044. case QLCNIC_BRDTYPE_P3P_REF_QG:
  1045. case QLCNIC_BRDTYPE_P3P_4_GB:
  1046. case QLCNIC_BRDTYPE_P3P_4_GB_MM:
  1047. adapter->ahw->port_type = QLCNIC_GBE;
  1048. break;
  1049. case QLCNIC_BRDTYPE_P3P_10G_TP:
  1050. adapter->ahw->port_type = (adapter->portnum < 2) ?
  1051. QLCNIC_XGBE : QLCNIC_GBE;
  1052. break;
  1053. default:
  1054. dev_err(&pdev->dev, "unknown board type %x\n", board_type);
  1055. adapter->ahw->port_type = QLCNIC_XGBE;
  1056. break;
  1057. }
  1058. return 0;
  1059. }
  1060. int
  1061. qlcnic_wol_supported(struct qlcnic_adapter *adapter)
  1062. {
  1063. u32 wol_cfg;
  1064. wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
  1065. if (wol_cfg & (1UL << adapter->portnum)) {
  1066. wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
  1067. if (wol_cfg & (1 << adapter->portnum))
  1068. return 1;
  1069. }
  1070. return 0;
  1071. }
  1072. int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
  1073. {
  1074. struct qlcnic_nic_req req;
  1075. int rv;
  1076. u64 word;
  1077. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  1078. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  1079. word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
  1080. req.req_hdr = cpu_to_le64(word);
  1081. req.words[0] = cpu_to_le64((u64)rate << 32);
  1082. req.words[1] = cpu_to_le64(state);
  1083. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  1084. if (rv)
  1085. dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
  1086. return rv;
  1087. }
  1088. /* FW dump related functions */
  1089. static u32
  1090. qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
  1091. u32 *buffer)
  1092. {
  1093. int i;
  1094. u32 addr, data;
  1095. struct __crb *crb = &entry->region.crb;
  1096. void __iomem *base = adapter->ahw->pci_base0;
  1097. addr = crb->addr;
  1098. for (i = 0; i < crb->no_ops; i++) {
  1099. QLCNIC_RD_DUMP_REG(addr, base, &data);
  1100. *buffer++ = cpu_to_le32(addr);
  1101. *buffer++ = cpu_to_le32(data);
  1102. addr += crb->stride;
  1103. }
  1104. return crb->no_ops * 2 * sizeof(u32);
  1105. }
  1106. static u32
  1107. qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
  1108. struct qlcnic_dump_entry *entry, u32 *buffer)
  1109. {
  1110. int i, k, timeout = 0;
  1111. void __iomem *base = adapter->ahw->pci_base0;
  1112. u32 addr, data;
  1113. u8 opcode, no_ops;
  1114. struct __ctrl *ctr = &entry->region.ctrl;
  1115. struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
  1116. addr = ctr->addr;
  1117. no_ops = ctr->no_ops;
  1118. for (i = 0; i < no_ops; i++) {
  1119. k = 0;
  1120. opcode = 0;
  1121. for (k = 0; k < 8; k++) {
  1122. if (!(ctr->opcode & (1 << k)))
  1123. continue;
  1124. switch (1 << k) {
  1125. case QLCNIC_DUMP_WCRB:
  1126. QLCNIC_WR_DUMP_REG(addr, base, ctr->val1);
  1127. break;
  1128. case QLCNIC_DUMP_RWCRB:
  1129. QLCNIC_RD_DUMP_REG(addr, base, &data);
  1130. QLCNIC_WR_DUMP_REG(addr, base, data);
  1131. break;
  1132. case QLCNIC_DUMP_ANDCRB:
  1133. QLCNIC_RD_DUMP_REG(addr, base, &data);
  1134. QLCNIC_WR_DUMP_REG(addr, base,
  1135. (data & ctr->val2));
  1136. break;
  1137. case QLCNIC_DUMP_ORCRB:
  1138. QLCNIC_RD_DUMP_REG(addr, base, &data);
  1139. QLCNIC_WR_DUMP_REG(addr, base,
  1140. (data | ctr->val3));
  1141. break;
  1142. case QLCNIC_DUMP_POLLCRB:
  1143. while (timeout <= ctr->timeout) {
  1144. QLCNIC_RD_DUMP_REG(addr, base, &data);
  1145. if ((data & ctr->val2) == ctr->val1)
  1146. break;
  1147. msleep(1);
  1148. timeout++;
  1149. }
  1150. if (timeout > ctr->timeout) {
  1151. dev_info(&adapter->pdev->dev,
  1152. "Timed out, aborting poll CRB\n");
  1153. return -EINVAL;
  1154. }
  1155. break;
  1156. case QLCNIC_DUMP_RD_SAVE:
  1157. if (ctr->index_a)
  1158. addr = t_hdr->saved_state[ctr->index_a];
  1159. QLCNIC_RD_DUMP_REG(addr, base, &data);
  1160. t_hdr->saved_state[ctr->index_v] = data;
  1161. break;
  1162. case QLCNIC_DUMP_WRT_SAVED:
  1163. if (ctr->index_v)
  1164. data = t_hdr->saved_state[ctr->index_v];
  1165. else
  1166. data = ctr->val1;
  1167. if (ctr->index_a)
  1168. addr = t_hdr->saved_state[ctr->index_a];
  1169. QLCNIC_WR_DUMP_REG(addr, base, data);
  1170. break;
  1171. case QLCNIC_DUMP_MOD_SAVE_ST:
  1172. data = t_hdr->saved_state[ctr->index_v];
  1173. data <<= ctr->shl_val;
  1174. data >>= ctr->shr_val;
  1175. if (ctr->val2)
  1176. data &= ctr->val2;
  1177. data |= ctr->val3;
  1178. data += ctr->val1;
  1179. t_hdr->saved_state[ctr->index_v] = data;
  1180. break;
  1181. default:
  1182. dev_info(&adapter->pdev->dev,
  1183. "Unknown opcode\n");
  1184. break;
  1185. }
  1186. }
  1187. addr += ctr->stride;
  1188. }
  1189. return 0;
  1190. }
  1191. static u32
  1192. qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
  1193. u32 *buffer)
  1194. {
  1195. int loop;
  1196. u32 val, data = 0;
  1197. struct __mux *mux = &entry->region.mux;
  1198. void __iomem *base = adapter->ahw->pci_base0;
  1199. val = mux->val;
  1200. for (loop = 0; loop < mux->no_ops; loop++) {
  1201. QLCNIC_WR_DUMP_REG(mux->addr, base, val);
  1202. QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data);
  1203. *buffer++ = cpu_to_le32(val);
  1204. *buffer++ = cpu_to_le32(data);
  1205. val += mux->val_stride;
  1206. }
  1207. return 2 * mux->no_ops * sizeof(u32);
  1208. }
  1209. static u32
  1210. qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
  1211. u32 *buffer)
  1212. {
  1213. int i, loop;
  1214. u32 cnt, addr, data, que_id = 0;
  1215. void __iomem *base = adapter->ahw->pci_base0;
  1216. struct __queue *que = &entry->region.que;
  1217. addr = que->read_addr;
  1218. cnt = que->read_addr_cnt;
  1219. for (loop = 0; loop < que->no_ops; loop++) {
  1220. QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
  1221. addr = que->read_addr;
  1222. for (i = 0; i < cnt; i++) {
  1223. QLCNIC_RD_DUMP_REG(addr, base, &data);
  1224. *buffer++ = cpu_to_le32(data);
  1225. addr += que->read_addr_stride;
  1226. }
  1227. que_id += que->stride;
  1228. }
  1229. return que->no_ops * cnt * sizeof(u32);
  1230. }
  1231. static u32
  1232. qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
  1233. u32 *buffer)
  1234. {
  1235. int i;
  1236. u32 data;
  1237. void __iomem *addr;
  1238. struct __ocm *ocm = &entry->region.ocm;
  1239. addr = adapter->ahw->pci_base0 + ocm->read_addr;
  1240. for (i = 0; i < ocm->no_ops; i++) {
  1241. data = readl(addr);
  1242. *buffer++ = cpu_to_le32(data);
  1243. addr += ocm->read_addr_stride;
  1244. }
  1245. return ocm->no_ops * sizeof(u32);
  1246. }
  1247. static u32
  1248. qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
  1249. u32 *buffer)
  1250. {
  1251. int i, count = 0;
  1252. u32 fl_addr, size, val, lck_val, addr;
  1253. struct __mem *rom = &entry->region.mem;
  1254. void __iomem *base = adapter->ahw->pci_base0;
  1255. fl_addr = rom->addr;
  1256. size = rom->size/4;
  1257. lock_try:
  1258. lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
  1259. if (!lck_val && count < MAX_CTL_CHECK) {
  1260. msleep(10);
  1261. count++;
  1262. goto lock_try;
  1263. }
  1264. writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
  1265. for (i = 0; i < size; i++) {
  1266. addr = fl_addr & 0xFFFF0000;
  1267. QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr);
  1268. addr = LSW(fl_addr) + FLASH_ROM_DATA;
  1269. QLCNIC_RD_DUMP_REG(addr, base, &val);
  1270. fl_addr += 4;
  1271. *buffer++ = cpu_to_le32(val);
  1272. }
  1273. readl(base + QLCNIC_FLASH_SEM2_ULK);
  1274. return rom->size;
  1275. }
  1276. static u32
  1277. qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
  1278. struct qlcnic_dump_entry *entry, u32 *buffer)
  1279. {
  1280. int i;
  1281. u32 cnt, val, data, addr;
  1282. void __iomem *base = adapter->ahw->pci_base0;
  1283. struct __cache *l1 = &entry->region.cache;
  1284. val = l1->init_tag_val;
  1285. for (i = 0; i < l1->no_ops; i++) {
  1286. QLCNIC_WR_DUMP_REG(l1->addr, base, val);
  1287. QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val));
  1288. addr = l1->read_addr;
  1289. cnt = l1->read_addr_num;
  1290. while (cnt) {
  1291. QLCNIC_RD_DUMP_REG(addr, base, &data);
  1292. *buffer++ = cpu_to_le32(data);
  1293. addr += l1->read_addr_stride;
  1294. cnt--;
  1295. }
  1296. val += l1->stride;
  1297. }
  1298. return l1->no_ops * l1->read_addr_num * sizeof(u32);
  1299. }
  1300. static u32
  1301. qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
  1302. struct qlcnic_dump_entry *entry, u32 *buffer)
  1303. {
  1304. int i;
  1305. u32 cnt, val, data, addr;
  1306. u8 poll_mask, poll_to, time_out = 0;
  1307. void __iomem *base = adapter->ahw->pci_base0;
  1308. struct __cache *l2 = &entry->region.cache;
  1309. val = l2->init_tag_val;
  1310. poll_mask = LSB(MSW(l2->ctrl_val));
  1311. poll_to = MSB(MSW(l2->ctrl_val));
  1312. for (i = 0; i < l2->no_ops; i++) {
  1313. QLCNIC_WR_DUMP_REG(l2->addr, base, val);
  1314. if (LSW(l2->ctrl_val))
  1315. QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base,
  1316. LSW(l2->ctrl_val));
  1317. if (!poll_mask)
  1318. goto skip_poll;
  1319. do {
  1320. QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data);
  1321. if (!(data & poll_mask))
  1322. break;
  1323. msleep(1);
  1324. time_out++;
  1325. } while (time_out <= poll_to);
  1326. if (time_out > poll_to) {
  1327. dev_err(&adapter->pdev->dev,
  1328. "Timeout exceeded in %s, aborting dump\n",
  1329. __func__);
  1330. return -EINVAL;
  1331. }
  1332. skip_poll:
  1333. addr = l2->read_addr;
  1334. cnt = l2->read_addr_num;
  1335. while (cnt) {
  1336. QLCNIC_RD_DUMP_REG(addr, base, &data);
  1337. *buffer++ = cpu_to_le32(data);
  1338. addr += l2->read_addr_stride;
  1339. cnt--;
  1340. }
  1341. val += l2->stride;
  1342. }
  1343. return l2->no_ops * l2->read_addr_num * sizeof(u32);
  1344. }
  1345. static u32
  1346. qlcnic_read_memory(struct qlcnic_adapter *adapter,
  1347. struct qlcnic_dump_entry *entry, u32 *buffer)
  1348. {
  1349. u32 addr, data, test, ret = 0;
  1350. int i, reg_read;
  1351. struct __mem *mem = &entry->region.mem;
  1352. void __iomem *base = adapter->ahw->pci_base0;
  1353. reg_read = mem->size;
  1354. addr = mem->addr;
  1355. /* check for data size of multiple of 16 and 16 byte alignment */
  1356. if ((addr & 0xf) || (reg_read%16)) {
  1357. dev_info(&adapter->pdev->dev,
  1358. "Unaligned memory addr:0x%x size:0x%x\n",
  1359. addr, reg_read);
  1360. return -EINVAL;
  1361. }
  1362. mutex_lock(&adapter->ahw->mem_lock);
  1363. while (reg_read != 0) {
  1364. QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr);
  1365. QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0);
  1366. QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base,
  1367. TA_CTL_ENABLE | TA_CTL_START);
  1368. for (i = 0; i < MAX_CTL_CHECK; i++) {
  1369. QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test);
  1370. if (!(test & TA_CTL_BUSY))
  1371. break;
  1372. }
  1373. if (i == MAX_CTL_CHECK) {
  1374. if (printk_ratelimit()) {
  1375. dev_err(&adapter->pdev->dev,
  1376. "failed to read through agent\n");
  1377. ret = -EINVAL;
  1378. goto out;
  1379. }
  1380. }
  1381. for (i = 0; i < 4; i++) {
  1382. QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data);
  1383. *buffer++ = cpu_to_le32(data);
  1384. }
  1385. addr += 16;
  1386. reg_read -= 16;
  1387. ret += 16;
  1388. }
  1389. out:
  1390. mutex_unlock(&adapter->ahw->mem_lock);
  1391. return mem->size;
  1392. }
  1393. static u32
  1394. qlcnic_dump_nop(struct qlcnic_adapter *adapter,
  1395. struct qlcnic_dump_entry *entry, u32 *buffer)
  1396. {
  1397. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1398. return 0;
  1399. }
  1400. struct qlcnic_dump_operations fw_dump_ops[] = {
  1401. { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
  1402. { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
  1403. { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
  1404. { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
  1405. { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
  1406. { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
  1407. { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
  1408. { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
  1409. { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
  1410. { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
  1411. { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
  1412. { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
  1413. { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
  1414. { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
  1415. { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
  1416. { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
  1417. { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
  1418. { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
  1419. { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
  1420. { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
  1421. };
  1422. /* Walk the template and collect dump for each entry in the dump template */
  1423. static int
  1424. qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
  1425. u32 size)
  1426. {
  1427. int ret = 1;
  1428. if (size != entry->hdr.cap_size) {
  1429. dev_info(dev,
  1430. "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
  1431. entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
  1432. dev_info(dev, "Aborting further dump capture\n");
  1433. ret = 0;
  1434. }
  1435. return ret;
  1436. }
  1437. int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
  1438. {
  1439. u32 *buffer;
  1440. char mesg[64];
  1441. char *msg[] = {mesg, NULL};
  1442. int i, k, ops_cnt, ops_index, dump_size = 0;
  1443. u32 entry_offset, dump, no_entries, buf_offset = 0;
  1444. struct qlcnic_dump_entry *entry;
  1445. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  1446. struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
  1447. if (fw_dump->clr) {
  1448. dev_info(&adapter->pdev->dev,
  1449. "Previous dump not cleared, not capturing dump\n");
  1450. return -EIO;
  1451. }
  1452. /* Calculate the size for dump data area only */
  1453. for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
  1454. if (i & tmpl_hdr->drv_cap_mask)
  1455. dump_size += tmpl_hdr->cap_sizes[k];
  1456. if (!dump_size)
  1457. return -EIO;
  1458. fw_dump->data = vzalloc(dump_size);
  1459. if (!fw_dump->data) {
  1460. dev_info(&adapter->pdev->dev,
  1461. "Unable to allocate (%d KB) for fw dump\n",
  1462. dump_size/1024);
  1463. return -ENOMEM;
  1464. }
  1465. buffer = fw_dump->data;
  1466. fw_dump->size = dump_size;
  1467. no_entries = tmpl_hdr->num_entries;
  1468. ops_cnt = ARRAY_SIZE(fw_dump_ops);
  1469. entry_offset = tmpl_hdr->offset;
  1470. tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
  1471. tmpl_hdr->sys_info[1] = adapter->fw_version;
  1472. for (i = 0; i < no_entries; i++) {
  1473. entry = (void *)tmpl_hdr + entry_offset;
  1474. if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
  1475. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1476. entry_offset += entry->hdr.offset;
  1477. continue;
  1478. }
  1479. /* Find the handler for this entry */
  1480. ops_index = 0;
  1481. while (ops_index < ops_cnt) {
  1482. if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
  1483. break;
  1484. ops_index++;
  1485. }
  1486. if (ops_index == ops_cnt) {
  1487. dev_info(&adapter->pdev->dev,
  1488. "Invalid entry type %d, exiting dump\n",
  1489. entry->hdr.type);
  1490. goto error;
  1491. }
  1492. /* Collect dump for this entry */
  1493. dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
  1494. if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
  1495. dump))
  1496. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1497. buf_offset += entry->hdr.cap_size;
  1498. entry_offset += entry->hdr.offset;
  1499. buffer = fw_dump->data + buf_offset;
  1500. }
  1501. if (dump_size != buf_offset) {
  1502. dev_info(&adapter->pdev->dev,
  1503. "Captured(%d) and expected size(%d) do not match\n",
  1504. buf_offset, dump_size);
  1505. goto error;
  1506. } else {
  1507. fw_dump->clr = 1;
  1508. snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
  1509. adapter->netdev->name);
  1510. dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
  1511. fw_dump->size);
  1512. /* Send a udev event to notify availability of FW dump */
  1513. kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
  1514. return 0;
  1515. }
  1516. error:
  1517. vfree(fw_dump->data);
  1518. return -EINVAL;
  1519. }