qlcnic_hw.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816
  1. /*
  2. * QLogic qlcnic NIC Driver
  3. * Copyright (c) 2009-2010 QLogic Corporation
  4. *
  5. * See LICENSE.qlcnic for copyright and licensing details.
  6. */
  7. #include "qlcnic.h"
  8. #include <linux/slab.h>
  9. #include <net/ip.h>
  10. #include <linux/bitops.h>
  11. #define MASK(n) ((1ULL<<(n))-1)
  12. #define OCM_WIN_P3P(addr) (addr & 0xffc0000)
  13. #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
  14. #define CRB_BLK(off) ((off >> 20) & 0x3f)
  15. #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
  16. #define CRB_WINDOW_2M (0x130060)
  17. #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
  18. #define CRB_INDIRECT_2M (0x1e0000UL)
  19. #ifndef readq
  20. static inline u64 readq(void __iomem *addr)
  21. {
  22. return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
  23. }
  24. #endif
  25. #ifndef writeq
  26. static inline void writeq(u64 val, void __iomem *addr)
  27. {
  28. writel(((u32) (val)), (addr));
  29. writel(((u32) (val >> 32)), (addr + 4));
  30. }
  31. #endif
  32. static struct crb_128M_2M_block_map
  33. crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
  34. {{{0, 0, 0, 0} } }, /* 0: PCI */
  35. {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
  36. {1, 0x0110000, 0x0120000, 0x130000},
  37. {1, 0x0120000, 0x0122000, 0x124000},
  38. {1, 0x0130000, 0x0132000, 0x126000},
  39. {1, 0x0140000, 0x0142000, 0x128000},
  40. {1, 0x0150000, 0x0152000, 0x12a000},
  41. {1, 0x0160000, 0x0170000, 0x110000},
  42. {1, 0x0170000, 0x0172000, 0x12e000},
  43. {0, 0x0000000, 0x0000000, 0x000000},
  44. {0, 0x0000000, 0x0000000, 0x000000},
  45. {0, 0x0000000, 0x0000000, 0x000000},
  46. {0, 0x0000000, 0x0000000, 0x000000},
  47. {0, 0x0000000, 0x0000000, 0x000000},
  48. {0, 0x0000000, 0x0000000, 0x000000},
  49. {1, 0x01e0000, 0x01e0800, 0x122000},
  50. {0, 0x0000000, 0x0000000, 0x000000} } },
  51. {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
  52. {{{0, 0, 0, 0} } }, /* 3: */
  53. {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
  54. {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
  55. {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
  56. {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
  57. {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
  58. {0, 0x0000000, 0x0000000, 0x000000},
  59. {0, 0x0000000, 0x0000000, 0x000000},
  60. {0, 0x0000000, 0x0000000, 0x000000},
  61. {0, 0x0000000, 0x0000000, 0x000000},
  62. {0, 0x0000000, 0x0000000, 0x000000},
  63. {0, 0x0000000, 0x0000000, 0x000000},
  64. {0, 0x0000000, 0x0000000, 0x000000},
  65. {0, 0x0000000, 0x0000000, 0x000000},
  66. {0, 0x0000000, 0x0000000, 0x000000},
  67. {0, 0x0000000, 0x0000000, 0x000000},
  68. {0, 0x0000000, 0x0000000, 0x000000},
  69. {0, 0x0000000, 0x0000000, 0x000000},
  70. {0, 0x0000000, 0x0000000, 0x000000},
  71. {0, 0x0000000, 0x0000000, 0x000000},
  72. {1, 0x08f0000, 0x08f2000, 0x172000} } },
  73. {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
  74. {0, 0x0000000, 0x0000000, 0x000000},
  75. {0, 0x0000000, 0x0000000, 0x000000},
  76. {0, 0x0000000, 0x0000000, 0x000000},
  77. {0, 0x0000000, 0x0000000, 0x000000},
  78. {0, 0x0000000, 0x0000000, 0x000000},
  79. {0, 0x0000000, 0x0000000, 0x000000},
  80. {0, 0x0000000, 0x0000000, 0x000000},
  81. {0, 0x0000000, 0x0000000, 0x000000},
  82. {0, 0x0000000, 0x0000000, 0x000000},
  83. {0, 0x0000000, 0x0000000, 0x000000},
  84. {0, 0x0000000, 0x0000000, 0x000000},
  85. {0, 0x0000000, 0x0000000, 0x000000},
  86. {0, 0x0000000, 0x0000000, 0x000000},
  87. {0, 0x0000000, 0x0000000, 0x000000},
  88. {1, 0x09f0000, 0x09f2000, 0x176000} } },
  89. {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
  90. {0, 0x0000000, 0x0000000, 0x000000},
  91. {0, 0x0000000, 0x0000000, 0x000000},
  92. {0, 0x0000000, 0x0000000, 0x000000},
  93. {0, 0x0000000, 0x0000000, 0x000000},
  94. {0, 0x0000000, 0x0000000, 0x000000},
  95. {0, 0x0000000, 0x0000000, 0x000000},
  96. {0, 0x0000000, 0x0000000, 0x000000},
  97. {0, 0x0000000, 0x0000000, 0x000000},
  98. {0, 0x0000000, 0x0000000, 0x000000},
  99. {0, 0x0000000, 0x0000000, 0x000000},
  100. {0, 0x0000000, 0x0000000, 0x000000},
  101. {0, 0x0000000, 0x0000000, 0x000000},
  102. {0, 0x0000000, 0x0000000, 0x000000},
  103. {0, 0x0000000, 0x0000000, 0x000000},
  104. {1, 0x0af0000, 0x0af2000, 0x17a000} } },
  105. {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
  106. {0, 0x0000000, 0x0000000, 0x000000},
  107. {0, 0x0000000, 0x0000000, 0x000000},
  108. {0, 0x0000000, 0x0000000, 0x000000},
  109. {0, 0x0000000, 0x0000000, 0x000000},
  110. {0, 0x0000000, 0x0000000, 0x000000},
  111. {0, 0x0000000, 0x0000000, 0x000000},
  112. {0, 0x0000000, 0x0000000, 0x000000},
  113. {0, 0x0000000, 0x0000000, 0x000000},
  114. {0, 0x0000000, 0x0000000, 0x000000},
  115. {0, 0x0000000, 0x0000000, 0x000000},
  116. {0, 0x0000000, 0x0000000, 0x000000},
  117. {0, 0x0000000, 0x0000000, 0x000000},
  118. {0, 0x0000000, 0x0000000, 0x000000},
  119. {0, 0x0000000, 0x0000000, 0x000000},
  120. {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
  121. {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
  122. {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
  123. {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
  124. {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
  125. {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
  126. {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
  127. {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
  128. {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
  129. {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
  130. {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
  131. {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
  132. {{{0, 0, 0, 0} } }, /* 23: */
  133. {{{0, 0, 0, 0} } }, /* 24: */
  134. {{{0, 0, 0, 0} } }, /* 25: */
  135. {{{0, 0, 0, 0} } }, /* 26: */
  136. {{{0, 0, 0, 0} } }, /* 27: */
  137. {{{0, 0, 0, 0} } }, /* 28: */
  138. {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
  139. {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
  140. {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
  141. {{{0} } }, /* 32: PCI */
  142. {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
  143. {1, 0x2110000, 0x2120000, 0x130000},
  144. {1, 0x2120000, 0x2122000, 0x124000},
  145. {1, 0x2130000, 0x2132000, 0x126000},
  146. {1, 0x2140000, 0x2142000, 0x128000},
  147. {1, 0x2150000, 0x2152000, 0x12a000},
  148. {1, 0x2160000, 0x2170000, 0x110000},
  149. {1, 0x2170000, 0x2172000, 0x12e000},
  150. {0, 0x0000000, 0x0000000, 0x000000},
  151. {0, 0x0000000, 0x0000000, 0x000000},
  152. {0, 0x0000000, 0x0000000, 0x000000},
  153. {0, 0x0000000, 0x0000000, 0x000000},
  154. {0, 0x0000000, 0x0000000, 0x000000},
  155. {0, 0x0000000, 0x0000000, 0x000000},
  156. {0, 0x0000000, 0x0000000, 0x000000},
  157. {0, 0x0000000, 0x0000000, 0x000000} } },
  158. {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
  159. {{{0} } }, /* 35: */
  160. {{{0} } }, /* 36: */
  161. {{{0} } }, /* 37: */
  162. {{{0} } }, /* 38: */
  163. {{{0} } }, /* 39: */
  164. {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
  165. {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
  166. {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
  167. {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
  168. {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
  169. {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
  170. {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
  171. {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
  172. {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
  173. {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
  174. {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
  175. {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
  176. {{{0} } }, /* 52: */
  177. {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
  178. {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
  179. {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
  180. {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
  181. {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
  182. {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
  183. {{{0} } }, /* 59: I2C0 */
  184. {{{0} } }, /* 60: I2C1 */
  185. {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
  186. {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
  187. {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
  188. };
  189. /*
  190. * top 12 bits of crb internal address (hub, agent)
  191. */
  192. static const unsigned crb_hub_agt[64] = {
  193. 0,
  194. QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
  195. QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
  196. QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
  197. 0,
  198. QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
  199. QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
  200. QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
  201. QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
  202. QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
  203. QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
  204. QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
  205. QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
  206. QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
  207. QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
  208. QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
  209. QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
  210. QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
  211. QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
  212. QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
  213. QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
  214. QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
  215. QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
  216. QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
  217. QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
  218. QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
  219. QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
  220. 0,
  221. QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
  222. QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
  223. 0,
  224. QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
  225. 0,
  226. QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
  227. QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
  228. 0,
  229. 0,
  230. 0,
  231. 0,
  232. 0,
  233. QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
  234. 0,
  235. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
  236. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
  237. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
  238. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
  239. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
  240. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
  241. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
  242. QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
  243. QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
  244. QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
  245. 0,
  246. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
  247. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
  248. QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
  249. QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
  250. 0,
  251. QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
  252. QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
  253. QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
  254. 0,
  255. QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
  256. 0,
  257. };
  258. static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
  259. {
  260. u32 dest;
  261. void __iomem *window_reg;
  262. dest = addr & 0xFFFF0000;
  263. window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
  264. writel(dest, window_reg);
  265. readl(window_reg);
  266. window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
  267. *data = readl(window_reg);
  268. }
  269. static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
  270. {
  271. u32 dest;
  272. void __iomem *window_reg;
  273. dest = addr & 0xFFFF0000;
  274. window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
  275. writel(dest, window_reg);
  276. readl(window_reg);
  277. window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
  278. writel(data, window_reg);
  279. readl(window_reg);
  280. }
  281. /* PCI Windowing for DDR regions. */
  282. #define QLCNIC_PCIE_SEM_TIMEOUT 10000
  283. int
  284. qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
  285. {
  286. int done = 0, timeout = 0;
  287. while (!done) {
  288. done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
  289. if (done == 1)
  290. break;
  291. if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
  292. dev_err(&adapter->pdev->dev,
  293. "Failed to acquire sem=%d lock; holdby=%d\n",
  294. sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
  295. return -EIO;
  296. }
  297. msleep(1);
  298. }
  299. if (id_reg)
  300. QLCWR32(adapter, id_reg, adapter->portnum);
  301. return 0;
  302. }
  303. void
  304. qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
  305. {
  306. QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
  307. }
  308. static int
  309. qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
  310. struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
  311. {
  312. u32 i, producer, consumer;
  313. struct qlcnic_cmd_buffer *pbuf;
  314. struct cmd_desc_type0 *cmd_desc;
  315. struct qlcnic_host_tx_ring *tx_ring;
  316. i = 0;
  317. if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
  318. return -EIO;
  319. tx_ring = adapter->tx_ring;
  320. __netif_tx_lock_bh(tx_ring->txq);
  321. producer = tx_ring->producer;
  322. consumer = tx_ring->sw_consumer;
  323. if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
  324. netif_tx_stop_queue(tx_ring->txq);
  325. smp_mb();
  326. if (qlcnic_tx_avail(tx_ring) > nr_desc) {
  327. if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
  328. netif_tx_wake_queue(tx_ring->txq);
  329. } else {
  330. adapter->stats.xmit_off++;
  331. __netif_tx_unlock_bh(tx_ring->txq);
  332. return -EBUSY;
  333. }
  334. }
  335. do {
  336. cmd_desc = &cmd_desc_arr[i];
  337. pbuf = &tx_ring->cmd_buf_arr[producer];
  338. pbuf->skb = NULL;
  339. pbuf->frag_count = 0;
  340. memcpy(&tx_ring->desc_head[producer],
  341. &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
  342. producer = get_next_index(producer, tx_ring->num_desc);
  343. i++;
  344. } while (i != nr_desc);
  345. tx_ring->producer = producer;
  346. qlcnic_update_cmd_producer(tx_ring);
  347. __netif_tx_unlock_bh(tx_ring->txq);
  348. return 0;
  349. }
  350. static int
  351. qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
  352. __le16 vlan_id, unsigned op)
  353. {
  354. struct qlcnic_nic_req req;
  355. struct qlcnic_mac_req *mac_req;
  356. struct qlcnic_vlan_req *vlan_req;
  357. u64 word;
  358. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  359. req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
  360. word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
  361. req.req_hdr = cpu_to_le64(word);
  362. mac_req = (struct qlcnic_mac_req *)&req.words[0];
  363. mac_req->op = op;
  364. memcpy(mac_req->mac_addr, addr, 6);
  365. vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
  366. vlan_req->vlan_id = vlan_id;
  367. return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  368. }
  369. static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
  370. {
  371. struct list_head *head;
  372. struct qlcnic_mac_list_s *cur;
  373. /* look up if already exists */
  374. list_for_each(head, &adapter->mac_list) {
  375. cur = list_entry(head, struct qlcnic_mac_list_s, list);
  376. if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
  377. return 0;
  378. }
  379. cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
  380. if (cur == NULL) {
  381. dev_err(&adapter->netdev->dev,
  382. "failed to add mac address filter\n");
  383. return -ENOMEM;
  384. }
  385. memcpy(cur->mac_addr, addr, ETH_ALEN);
  386. if (qlcnic_sre_macaddr_change(adapter,
  387. cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
  388. kfree(cur);
  389. return -EIO;
  390. }
  391. list_add_tail(&cur->list, &adapter->mac_list);
  392. return 0;
  393. }
  394. void qlcnic_set_multi(struct net_device *netdev)
  395. {
  396. struct qlcnic_adapter *adapter = netdev_priv(netdev);
  397. struct netdev_hw_addr *ha;
  398. static const u8 bcast_addr[ETH_ALEN] = {
  399. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  400. };
  401. u32 mode = VPORT_MISS_MODE_DROP;
  402. if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
  403. return;
  404. qlcnic_nic_add_mac(adapter, adapter->mac_addr);
  405. qlcnic_nic_add_mac(adapter, bcast_addr);
  406. if (netdev->flags & IFF_PROMISC) {
  407. if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
  408. mode = VPORT_MISS_MODE_ACCEPT_ALL;
  409. goto send_fw_cmd;
  410. }
  411. if ((netdev->flags & IFF_ALLMULTI) ||
  412. (netdev_mc_count(netdev) > adapter->max_mc_count)) {
  413. mode = VPORT_MISS_MODE_ACCEPT_MULTI;
  414. goto send_fw_cmd;
  415. }
  416. if (!netdev_mc_empty(netdev)) {
  417. netdev_for_each_mc_addr(ha, netdev) {
  418. qlcnic_nic_add_mac(adapter, ha->addr);
  419. }
  420. }
  421. send_fw_cmd:
  422. if (mode == VPORT_MISS_MODE_ACCEPT_ALL) {
  423. qlcnic_alloc_lb_filters_mem(adapter);
  424. adapter->mac_learn = 1;
  425. } else {
  426. adapter->mac_learn = 0;
  427. }
  428. qlcnic_nic_set_promisc(adapter, mode);
  429. }
  430. int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
  431. {
  432. struct qlcnic_nic_req req;
  433. u64 word;
  434. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  435. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  436. word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
  437. ((u64)adapter->portnum << 16);
  438. req.req_hdr = cpu_to_le64(word);
  439. req.words[0] = cpu_to_le64(mode);
  440. return qlcnic_send_cmd_descs(adapter,
  441. (struct cmd_desc_type0 *)&req, 1);
  442. }
  443. void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
  444. {
  445. struct qlcnic_mac_list_s *cur;
  446. struct list_head *head = &adapter->mac_list;
  447. while (!list_empty(head)) {
  448. cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
  449. qlcnic_sre_macaddr_change(adapter,
  450. cur->mac_addr, 0, QLCNIC_MAC_DEL);
  451. list_del(&cur->list);
  452. kfree(cur);
  453. }
  454. }
  455. void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
  456. {
  457. struct qlcnic_filter *tmp_fil;
  458. struct hlist_node *tmp_hnode, *n;
  459. struct hlist_head *head;
  460. int i;
  461. for (i = 0; i < adapter->fhash.fmax; i++) {
  462. head = &(adapter->fhash.fhead[i]);
  463. hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
  464. {
  465. if (jiffies >
  466. (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
  467. qlcnic_sre_macaddr_change(adapter,
  468. tmp_fil->faddr, tmp_fil->vlan_id,
  469. tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
  470. QLCNIC_MAC_DEL);
  471. spin_lock_bh(&adapter->mac_learn_lock);
  472. adapter->fhash.fnum--;
  473. hlist_del(&tmp_fil->fnode);
  474. spin_unlock_bh(&adapter->mac_learn_lock);
  475. kfree(tmp_fil);
  476. }
  477. }
  478. }
  479. }
  480. void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
  481. {
  482. struct qlcnic_filter *tmp_fil;
  483. struct hlist_node *tmp_hnode, *n;
  484. struct hlist_head *head;
  485. int i;
  486. for (i = 0; i < adapter->fhash.fmax; i++) {
  487. head = &(adapter->fhash.fhead[i]);
  488. hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
  489. qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
  490. tmp_fil->vlan_id, tmp_fil->vlan_id ?
  491. QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
  492. spin_lock_bh(&adapter->mac_learn_lock);
  493. adapter->fhash.fnum--;
  494. hlist_del(&tmp_fil->fnode);
  495. spin_unlock_bh(&adapter->mac_learn_lock);
  496. kfree(tmp_fil);
  497. }
  498. }
  499. }
  500. static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
  501. {
  502. struct qlcnic_nic_req req;
  503. int rv;
  504. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  505. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  506. req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
  507. ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32));
  508. req.words[0] = cpu_to_le64(flag);
  509. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  510. if (rv != 0)
  511. dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n",
  512. flag ? "Set" : "Reset");
  513. return rv;
  514. }
  515. int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
  516. {
  517. if (qlcnic_set_fw_loopback(adapter, mode))
  518. return -EIO;
  519. if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) {
  520. qlcnic_set_fw_loopback(adapter, 0);
  521. return -EIO;
  522. }
  523. msleep(1000);
  524. return 0;
  525. }
  526. void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
  527. {
  528. int mode = VPORT_MISS_MODE_DROP;
  529. struct net_device *netdev = adapter->netdev;
  530. qlcnic_set_fw_loopback(adapter, 0);
  531. if (netdev->flags & IFF_PROMISC)
  532. mode = VPORT_MISS_MODE_ACCEPT_ALL;
  533. else if (netdev->flags & IFF_ALLMULTI)
  534. mode = VPORT_MISS_MODE_ACCEPT_MULTI;
  535. qlcnic_nic_set_promisc(adapter, mode);
  536. msleep(1000);
  537. }
  538. /*
  539. * Send the interrupt coalescing parameter set by ethtool to the card.
  540. */
  541. int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
  542. {
  543. struct qlcnic_nic_req req;
  544. int rv;
  545. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  546. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  547. req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
  548. ((u64) adapter->portnum << 16));
  549. req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
  550. req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
  551. ((u64) adapter->ahw->coal.rx_time_us) << 16);
  552. req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
  553. ((u64) adapter->ahw->coal.type) << 32 |
  554. ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
  555. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  556. if (rv != 0)
  557. dev_err(&adapter->netdev->dev,
  558. "Could not send interrupt coalescing parameters\n");
  559. return rv;
  560. }
  561. int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
  562. {
  563. struct qlcnic_nic_req req;
  564. u64 word;
  565. int rv;
  566. if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
  567. return 0;
  568. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  569. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  570. word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
  571. req.req_hdr = cpu_to_le64(word);
  572. req.words[0] = cpu_to_le64(enable);
  573. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  574. if (rv != 0)
  575. dev_err(&adapter->netdev->dev,
  576. "Could not send configure hw lro request\n");
  577. return rv;
  578. }
  579. int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
  580. {
  581. struct qlcnic_nic_req req;
  582. u64 word;
  583. int rv;
  584. if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
  585. return 0;
  586. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  587. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  588. word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
  589. ((u64)adapter->portnum << 16);
  590. req.req_hdr = cpu_to_le64(word);
  591. req.words[0] = cpu_to_le64(enable);
  592. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  593. if (rv != 0)
  594. dev_err(&adapter->netdev->dev,
  595. "Could not send configure bridge mode request\n");
  596. adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
  597. return rv;
  598. }
  599. #define RSS_HASHTYPE_IP_TCP 0x3
  600. int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
  601. {
  602. struct qlcnic_nic_req req;
  603. u64 word;
  604. int i, rv;
  605. static const u64 key[] = {
  606. 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
  607. 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
  608. 0x255b0ec26d5a56daULL
  609. };
  610. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  611. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  612. word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
  613. req.req_hdr = cpu_to_le64(word);
  614. /*
  615. * RSS request:
  616. * bits 3-0: hash_method
  617. * 5-4: hash_type_ipv4
  618. * 7-6: hash_type_ipv6
  619. * 8: enable
  620. * 9: use indirection table
  621. * 47-10: reserved
  622. * 63-48: indirection table mask
  623. */
  624. word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
  625. ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
  626. ((u64)(enable & 0x1) << 8) |
  627. ((0x7ULL) << 48);
  628. req.words[0] = cpu_to_le64(word);
  629. for (i = 0; i < 5; i++)
  630. req.words[i+1] = cpu_to_le64(key[i]);
  631. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  632. if (rv != 0)
  633. dev_err(&adapter->netdev->dev, "could not configure RSS\n");
  634. return rv;
  635. }
  636. int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
  637. {
  638. struct qlcnic_nic_req req;
  639. struct qlcnic_ipaddr *ipa;
  640. u64 word;
  641. int rv;
  642. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  643. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  644. word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
  645. req.req_hdr = cpu_to_le64(word);
  646. req.words[0] = cpu_to_le64(cmd);
  647. ipa = (struct qlcnic_ipaddr *)&req.words[1];
  648. ipa->ipv4 = ip;
  649. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  650. if (rv != 0)
  651. dev_err(&adapter->netdev->dev,
  652. "could not notify %s IP 0x%x reuqest\n",
  653. (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
  654. return rv;
  655. }
  656. int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
  657. {
  658. struct qlcnic_nic_req req;
  659. u64 word;
  660. int rv;
  661. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  662. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  663. word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
  664. req.req_hdr = cpu_to_le64(word);
  665. req.words[0] = cpu_to_le64(enable | (enable << 8));
  666. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  667. if (rv != 0)
  668. dev_err(&adapter->netdev->dev,
  669. "could not configure link notification\n");
  670. return rv;
  671. }
  672. int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
  673. {
  674. struct qlcnic_nic_req req;
  675. u64 word;
  676. int rv;
  677. if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
  678. return 0;
  679. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  680. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  681. word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
  682. ((u64)adapter->portnum << 16) |
  683. ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
  684. req.req_hdr = cpu_to_le64(word);
  685. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  686. if (rv != 0)
  687. dev_err(&adapter->netdev->dev,
  688. "could not cleanup lro flows\n");
  689. return rv;
  690. }
  691. /*
  692. * qlcnic_change_mtu - Change the Maximum Transfer Unit
  693. * @returns 0 on success, negative on failure
  694. */
  695. int qlcnic_change_mtu(struct net_device *netdev, int mtu)
  696. {
  697. struct qlcnic_adapter *adapter = netdev_priv(netdev);
  698. int rc = 0;
  699. if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
  700. dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
  701. " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
  702. return -EINVAL;
  703. }
  704. rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
  705. if (!rc)
  706. netdev->mtu = mtu;
  707. return rc;
  708. }
  709. netdev_features_t qlcnic_fix_features(struct net_device *netdev,
  710. netdev_features_t features)
  711. {
  712. struct qlcnic_adapter *adapter = netdev_priv(netdev);
  713. if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
  714. netdev_features_t changed = features ^ netdev->features;
  715. features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
  716. }
  717. if (!(features & NETIF_F_RXCSUM))
  718. features &= ~NETIF_F_LRO;
  719. return features;
  720. }
  721. int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
  722. {
  723. struct qlcnic_adapter *adapter = netdev_priv(netdev);
  724. netdev_features_t changed = netdev->features ^ features;
  725. int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
  726. if (!(changed & NETIF_F_LRO))
  727. return 0;
  728. netdev->features = features ^ NETIF_F_LRO;
  729. if (qlcnic_config_hw_lro(adapter, hw_lro))
  730. return -EIO;
  731. if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
  732. return -EIO;
  733. return 0;
  734. }
  735. /*
  736. * Changes the CRB window to the specified window.
  737. */
  738. /* Returns < 0 if off is not valid,
  739. * 1 if window access is needed. 'off' is set to offset from
  740. * CRB space in 128M pci map
  741. * 0 if no window access is needed. 'off' is set to 2M addr
  742. * In: 'off' is offset from base in 128M pci map
  743. */
  744. static int
  745. qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
  746. ulong off, void __iomem **addr)
  747. {
  748. const struct crb_128M_2M_sub_block_map *m;
  749. if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
  750. return -EINVAL;
  751. off -= QLCNIC_PCI_CRBSPACE;
  752. /*
  753. * Try direct map
  754. */
  755. m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
  756. if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
  757. *addr = adapter->ahw->pci_base0 + m->start_2M +
  758. (off - m->start_128M);
  759. return 0;
  760. }
  761. /*
  762. * Not in direct map, use crb window
  763. */
  764. *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
  765. return 1;
  766. }
  767. /*
  768. * In: 'off' is offset from CRB space in 128M pci map
  769. * Out: 'off' is 2M pci map addr
  770. * side effect: lock crb window
  771. */
  772. static int
  773. qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
  774. {
  775. u32 window;
  776. void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
  777. off -= QLCNIC_PCI_CRBSPACE;
  778. window = CRB_HI(off);
  779. if (window == 0) {
  780. dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
  781. return -EIO;
  782. }
  783. writel(window, addr);
  784. if (readl(addr) != window) {
  785. if (printk_ratelimit())
  786. dev_warn(&adapter->pdev->dev,
  787. "failed to set CRB window to %d off 0x%lx\n",
  788. window, off);
  789. return -EIO;
  790. }
  791. return 0;
  792. }
  793. int
  794. qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
  795. {
  796. unsigned long flags;
  797. int rv;
  798. void __iomem *addr = NULL;
  799. rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
  800. if (rv == 0) {
  801. writel(data, addr);
  802. return 0;
  803. }
  804. if (rv > 0) {
  805. /* indirect access */
  806. write_lock_irqsave(&adapter->ahw->crb_lock, flags);
  807. crb_win_lock(adapter);
  808. rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
  809. if (!rv)
  810. writel(data, addr);
  811. crb_win_unlock(adapter);
  812. write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
  813. return rv;
  814. }
  815. dev_err(&adapter->pdev->dev,
  816. "%s: invalid offset: 0x%016lx\n", __func__, off);
  817. dump_stack();
  818. return -EIO;
  819. }
  820. u32
  821. qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
  822. {
  823. unsigned long flags;
  824. int rv;
  825. u32 data = -1;
  826. void __iomem *addr = NULL;
  827. rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
  828. if (rv == 0)
  829. return readl(addr);
  830. if (rv > 0) {
  831. /* indirect access */
  832. write_lock_irqsave(&adapter->ahw->crb_lock, flags);
  833. crb_win_lock(adapter);
  834. if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
  835. data = readl(addr);
  836. crb_win_unlock(adapter);
  837. write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
  838. return data;
  839. }
  840. dev_err(&adapter->pdev->dev,
  841. "%s: invalid offset: 0x%016lx\n", __func__, off);
  842. dump_stack();
  843. return -1;
  844. }
  845. void __iomem *
  846. qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
  847. {
  848. void __iomem *addr = NULL;
  849. WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
  850. return addr;
  851. }
  852. static int
  853. qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
  854. u64 addr, u32 *start)
  855. {
  856. u32 window;
  857. window = OCM_WIN_P3P(addr);
  858. writel(window, adapter->ahw->ocm_win_crb);
  859. /* read back to flush */
  860. readl(adapter->ahw->ocm_win_crb);
  861. *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
  862. return 0;
  863. }
  864. static int
  865. qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
  866. u64 *data, int op)
  867. {
  868. void __iomem *addr;
  869. int ret;
  870. u32 start;
  871. mutex_lock(&adapter->ahw->mem_lock);
  872. ret = qlcnic_pci_set_window_2M(adapter, off, &start);
  873. if (ret != 0)
  874. goto unlock;
  875. addr = adapter->ahw->pci_base0 + start;
  876. if (op == 0) /* read */
  877. *data = readq(addr);
  878. else /* write */
  879. writeq(*data, addr);
  880. unlock:
  881. mutex_unlock(&adapter->ahw->mem_lock);
  882. return ret;
  883. }
  884. void
  885. qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
  886. {
  887. void __iomem *addr = adapter->ahw->pci_base0 +
  888. QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
  889. mutex_lock(&adapter->ahw->mem_lock);
  890. *data = readq(addr);
  891. mutex_unlock(&adapter->ahw->mem_lock);
  892. }
  893. void
  894. qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
  895. {
  896. void __iomem *addr = adapter->ahw->pci_base0 +
  897. QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
  898. mutex_lock(&adapter->ahw->mem_lock);
  899. writeq(data, addr);
  900. mutex_unlock(&adapter->ahw->mem_lock);
  901. }
  902. #define MAX_CTL_CHECK 1000
  903. int
  904. qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
  905. u64 off, u64 data)
  906. {
  907. int i, j, ret;
  908. u32 temp, off8;
  909. void __iomem *mem_crb;
  910. /* Only 64-bit aligned access */
  911. if (off & 7)
  912. return -EIO;
  913. /* P3 onward, test agent base for MIU and SIU is same */
  914. if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
  915. QLCNIC_ADDR_QDR_NET_MAX)) {
  916. mem_crb = qlcnic_get_ioaddr(adapter,
  917. QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
  918. goto correct;
  919. }
  920. if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
  921. mem_crb = qlcnic_get_ioaddr(adapter,
  922. QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
  923. goto correct;
  924. }
  925. if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
  926. return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
  927. return -EIO;
  928. correct:
  929. off8 = off & ~0xf;
  930. mutex_lock(&adapter->ahw->mem_lock);
  931. writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
  932. writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
  933. i = 0;
  934. writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
  935. writel((TA_CTL_START | TA_CTL_ENABLE),
  936. (mem_crb + TEST_AGT_CTRL));
  937. for (j = 0; j < MAX_CTL_CHECK; j++) {
  938. temp = readl(mem_crb + TEST_AGT_CTRL);
  939. if ((temp & TA_CTL_BUSY) == 0)
  940. break;
  941. }
  942. if (j >= MAX_CTL_CHECK) {
  943. ret = -EIO;
  944. goto done;
  945. }
  946. i = (off & 0xf) ? 0 : 2;
  947. writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
  948. mem_crb + MIU_TEST_AGT_WRDATA(i));
  949. writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
  950. mem_crb + MIU_TEST_AGT_WRDATA(i+1));
  951. i = (off & 0xf) ? 2 : 0;
  952. writel(data & 0xffffffff,
  953. mem_crb + MIU_TEST_AGT_WRDATA(i));
  954. writel((data >> 32) & 0xffffffff,
  955. mem_crb + MIU_TEST_AGT_WRDATA(i+1));
  956. writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
  957. writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
  958. (mem_crb + TEST_AGT_CTRL));
  959. for (j = 0; j < MAX_CTL_CHECK; j++) {
  960. temp = readl(mem_crb + TEST_AGT_CTRL);
  961. if ((temp & TA_CTL_BUSY) == 0)
  962. break;
  963. }
  964. if (j >= MAX_CTL_CHECK) {
  965. if (printk_ratelimit())
  966. dev_err(&adapter->pdev->dev,
  967. "failed to write through agent\n");
  968. ret = -EIO;
  969. } else
  970. ret = 0;
  971. done:
  972. mutex_unlock(&adapter->ahw->mem_lock);
  973. return ret;
  974. }
  975. int
  976. qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
  977. u64 off, u64 *data)
  978. {
  979. int j, ret;
  980. u32 temp, off8;
  981. u64 val;
  982. void __iomem *mem_crb;
  983. /* Only 64-bit aligned access */
  984. if (off & 7)
  985. return -EIO;
  986. /* P3 onward, test agent base for MIU and SIU is same */
  987. if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
  988. QLCNIC_ADDR_QDR_NET_MAX)) {
  989. mem_crb = qlcnic_get_ioaddr(adapter,
  990. QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
  991. goto correct;
  992. }
  993. if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
  994. mem_crb = qlcnic_get_ioaddr(adapter,
  995. QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
  996. goto correct;
  997. }
  998. if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
  999. return qlcnic_pci_mem_access_direct(adapter,
  1000. off, data, 0);
  1001. }
  1002. return -EIO;
  1003. correct:
  1004. off8 = off & ~0xf;
  1005. mutex_lock(&adapter->ahw->mem_lock);
  1006. writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
  1007. writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
  1008. writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
  1009. writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
  1010. for (j = 0; j < MAX_CTL_CHECK; j++) {
  1011. temp = readl(mem_crb + TEST_AGT_CTRL);
  1012. if ((temp & TA_CTL_BUSY) == 0)
  1013. break;
  1014. }
  1015. if (j >= MAX_CTL_CHECK) {
  1016. if (printk_ratelimit())
  1017. dev_err(&adapter->pdev->dev,
  1018. "failed to read through agent\n");
  1019. ret = -EIO;
  1020. } else {
  1021. off8 = MIU_TEST_AGT_RDDATA_LO;
  1022. if (off & 0xf)
  1023. off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
  1024. temp = readl(mem_crb + off8 + 4);
  1025. val = (u64)temp << 32;
  1026. val |= readl(mem_crb + off8);
  1027. *data = val;
  1028. ret = 0;
  1029. }
  1030. mutex_unlock(&adapter->ahw->mem_lock);
  1031. return ret;
  1032. }
  1033. int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
  1034. {
  1035. int offset, board_type, magic;
  1036. struct pci_dev *pdev = adapter->pdev;
  1037. offset = QLCNIC_FW_MAGIC_OFFSET;
  1038. if (qlcnic_rom_fast_read(adapter, offset, &magic))
  1039. return -EIO;
  1040. if (magic != QLCNIC_BDINFO_MAGIC) {
  1041. dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
  1042. magic);
  1043. return -EIO;
  1044. }
  1045. offset = QLCNIC_BRDTYPE_OFFSET;
  1046. if (qlcnic_rom_fast_read(adapter, offset, &board_type))
  1047. return -EIO;
  1048. adapter->ahw->board_type = board_type;
  1049. if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
  1050. u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
  1051. if ((gpio & 0x8000) == 0)
  1052. board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
  1053. }
  1054. switch (board_type) {
  1055. case QLCNIC_BRDTYPE_P3P_HMEZ:
  1056. case QLCNIC_BRDTYPE_P3P_XG_LOM:
  1057. case QLCNIC_BRDTYPE_P3P_10G_CX4:
  1058. case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
  1059. case QLCNIC_BRDTYPE_P3P_IMEZ:
  1060. case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
  1061. case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
  1062. case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
  1063. case QLCNIC_BRDTYPE_P3P_10G_XFP:
  1064. case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
  1065. adapter->ahw->port_type = QLCNIC_XGBE;
  1066. break;
  1067. case QLCNIC_BRDTYPE_P3P_REF_QG:
  1068. case QLCNIC_BRDTYPE_P3P_4_GB:
  1069. case QLCNIC_BRDTYPE_P3P_4_GB_MM:
  1070. adapter->ahw->port_type = QLCNIC_GBE;
  1071. break;
  1072. case QLCNIC_BRDTYPE_P3P_10G_TP:
  1073. adapter->ahw->port_type = (adapter->portnum < 2) ?
  1074. QLCNIC_XGBE : QLCNIC_GBE;
  1075. break;
  1076. default:
  1077. dev_err(&pdev->dev, "unknown board type %x\n", board_type);
  1078. adapter->ahw->port_type = QLCNIC_XGBE;
  1079. break;
  1080. }
  1081. return 0;
  1082. }
  1083. int
  1084. qlcnic_wol_supported(struct qlcnic_adapter *adapter)
  1085. {
  1086. u32 wol_cfg;
  1087. wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
  1088. if (wol_cfg & (1UL << adapter->portnum)) {
  1089. wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
  1090. if (wol_cfg & (1 << adapter->portnum))
  1091. return 1;
  1092. }
  1093. return 0;
  1094. }
  1095. int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
  1096. {
  1097. struct qlcnic_nic_req req;
  1098. int rv;
  1099. u64 word;
  1100. memset(&req, 0, sizeof(struct qlcnic_nic_req));
  1101. req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
  1102. word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
  1103. req.req_hdr = cpu_to_le64(word);
  1104. req.words[0] = cpu_to_le64((u64)rate << 32);
  1105. req.words[1] = cpu_to_le64(state);
  1106. rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
  1107. if (rv)
  1108. dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
  1109. return rv;
  1110. }
  1111. /* FW dump related functions */
  1112. static u32
  1113. qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
  1114. u32 *buffer)
  1115. {
  1116. int i;
  1117. u32 addr, data;
  1118. struct __crb *crb = &entry->region.crb;
  1119. void __iomem *base = adapter->ahw->pci_base0;
  1120. addr = crb->addr;
  1121. for (i = 0; i < crb->no_ops; i++) {
  1122. qlcnic_read_dump_reg(addr, base, &data);
  1123. *buffer++ = cpu_to_le32(addr);
  1124. *buffer++ = cpu_to_le32(data);
  1125. addr += crb->stride;
  1126. }
  1127. return crb->no_ops * 2 * sizeof(u32);
  1128. }
  1129. static u32
  1130. qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
  1131. struct qlcnic_dump_entry *entry, u32 *buffer)
  1132. {
  1133. int i, k, timeout = 0;
  1134. void __iomem *base = adapter->ahw->pci_base0;
  1135. u32 addr, data;
  1136. u8 opcode, no_ops;
  1137. struct __ctrl *ctr = &entry->region.ctrl;
  1138. struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
  1139. addr = ctr->addr;
  1140. no_ops = ctr->no_ops;
  1141. for (i = 0; i < no_ops; i++) {
  1142. k = 0;
  1143. opcode = 0;
  1144. for (k = 0; k < 8; k++) {
  1145. if (!(ctr->opcode & (1 << k)))
  1146. continue;
  1147. switch (1 << k) {
  1148. case QLCNIC_DUMP_WCRB:
  1149. qlcnic_write_dump_reg(addr, base, ctr->val1);
  1150. break;
  1151. case QLCNIC_DUMP_RWCRB:
  1152. qlcnic_read_dump_reg(addr, base, &data);
  1153. qlcnic_write_dump_reg(addr, base, data);
  1154. break;
  1155. case QLCNIC_DUMP_ANDCRB:
  1156. qlcnic_read_dump_reg(addr, base, &data);
  1157. qlcnic_write_dump_reg(addr, base,
  1158. data & ctr->val2);
  1159. break;
  1160. case QLCNIC_DUMP_ORCRB:
  1161. qlcnic_read_dump_reg(addr, base, &data);
  1162. qlcnic_write_dump_reg(addr, base,
  1163. data | ctr->val3);
  1164. break;
  1165. case QLCNIC_DUMP_POLLCRB:
  1166. while (timeout <= ctr->timeout) {
  1167. qlcnic_read_dump_reg(addr, base, &data);
  1168. if ((data & ctr->val2) == ctr->val1)
  1169. break;
  1170. msleep(1);
  1171. timeout++;
  1172. }
  1173. if (timeout > ctr->timeout) {
  1174. dev_info(&adapter->pdev->dev,
  1175. "Timed out, aborting poll CRB\n");
  1176. return -EINVAL;
  1177. }
  1178. break;
  1179. case QLCNIC_DUMP_RD_SAVE:
  1180. if (ctr->index_a)
  1181. addr = t_hdr->saved_state[ctr->index_a];
  1182. qlcnic_read_dump_reg(addr, base, &data);
  1183. t_hdr->saved_state[ctr->index_v] = data;
  1184. break;
  1185. case QLCNIC_DUMP_WRT_SAVED:
  1186. if (ctr->index_v)
  1187. data = t_hdr->saved_state[ctr->index_v];
  1188. else
  1189. data = ctr->val1;
  1190. if (ctr->index_a)
  1191. addr = t_hdr->saved_state[ctr->index_a];
  1192. qlcnic_write_dump_reg(addr, base, data);
  1193. break;
  1194. case QLCNIC_DUMP_MOD_SAVE_ST:
  1195. data = t_hdr->saved_state[ctr->index_v];
  1196. data <<= ctr->shl_val;
  1197. data >>= ctr->shr_val;
  1198. if (ctr->val2)
  1199. data &= ctr->val2;
  1200. data |= ctr->val3;
  1201. data += ctr->val1;
  1202. t_hdr->saved_state[ctr->index_v] = data;
  1203. break;
  1204. default:
  1205. dev_info(&adapter->pdev->dev,
  1206. "Unknown opcode\n");
  1207. break;
  1208. }
  1209. }
  1210. addr += ctr->stride;
  1211. }
  1212. return 0;
  1213. }
  1214. static u32
  1215. qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
  1216. u32 *buffer)
  1217. {
  1218. int loop;
  1219. u32 val, data = 0;
  1220. struct __mux *mux = &entry->region.mux;
  1221. void __iomem *base = adapter->ahw->pci_base0;
  1222. val = mux->val;
  1223. for (loop = 0; loop < mux->no_ops; loop++) {
  1224. qlcnic_write_dump_reg(mux->addr, base, val);
  1225. qlcnic_read_dump_reg(mux->read_addr, base, &data);
  1226. *buffer++ = cpu_to_le32(val);
  1227. *buffer++ = cpu_to_le32(data);
  1228. val += mux->val_stride;
  1229. }
  1230. return 2 * mux->no_ops * sizeof(u32);
  1231. }
  1232. static u32
  1233. qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
  1234. u32 *buffer)
  1235. {
  1236. int i, loop;
  1237. u32 cnt, addr, data, que_id = 0;
  1238. void __iomem *base = adapter->ahw->pci_base0;
  1239. struct __queue *que = &entry->region.que;
  1240. addr = que->read_addr;
  1241. cnt = que->read_addr_cnt;
  1242. for (loop = 0; loop < que->no_ops; loop++) {
  1243. qlcnic_write_dump_reg(que->sel_addr, base, que_id);
  1244. addr = que->read_addr;
  1245. for (i = 0; i < cnt; i++) {
  1246. qlcnic_read_dump_reg(addr, base, &data);
  1247. *buffer++ = cpu_to_le32(data);
  1248. addr += que->read_addr_stride;
  1249. }
  1250. que_id += que->stride;
  1251. }
  1252. return que->no_ops * cnt * sizeof(u32);
  1253. }
  1254. static u32
  1255. qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
  1256. u32 *buffer)
  1257. {
  1258. int i;
  1259. u32 data;
  1260. void __iomem *addr;
  1261. struct __ocm *ocm = &entry->region.ocm;
  1262. addr = adapter->ahw->pci_base0 + ocm->read_addr;
  1263. for (i = 0; i < ocm->no_ops; i++) {
  1264. data = readl(addr);
  1265. *buffer++ = cpu_to_le32(data);
  1266. addr += ocm->read_addr_stride;
  1267. }
  1268. return ocm->no_ops * sizeof(u32);
  1269. }
  1270. static u32
  1271. qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
  1272. u32 *buffer)
  1273. {
  1274. int i, count = 0;
  1275. u32 fl_addr, size, val, lck_val, addr;
  1276. struct __mem *rom = &entry->region.mem;
  1277. void __iomem *base = adapter->ahw->pci_base0;
  1278. fl_addr = rom->addr;
  1279. size = rom->size/4;
  1280. lock_try:
  1281. lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
  1282. if (!lck_val && count < MAX_CTL_CHECK) {
  1283. msleep(10);
  1284. count++;
  1285. goto lock_try;
  1286. }
  1287. writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
  1288. for (i = 0; i < size; i++) {
  1289. addr = fl_addr & 0xFFFF0000;
  1290. qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
  1291. addr = LSW(fl_addr) + FLASH_ROM_DATA;
  1292. qlcnic_read_dump_reg(addr, base, &val);
  1293. fl_addr += 4;
  1294. *buffer++ = cpu_to_le32(val);
  1295. }
  1296. readl(base + QLCNIC_FLASH_SEM2_ULK);
  1297. return rom->size;
  1298. }
  1299. static u32
  1300. qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
  1301. struct qlcnic_dump_entry *entry, u32 *buffer)
  1302. {
  1303. int i;
  1304. u32 cnt, val, data, addr;
  1305. void __iomem *base = adapter->ahw->pci_base0;
  1306. struct __cache *l1 = &entry->region.cache;
  1307. val = l1->init_tag_val;
  1308. for (i = 0; i < l1->no_ops; i++) {
  1309. qlcnic_write_dump_reg(l1->addr, base, val);
  1310. qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
  1311. addr = l1->read_addr;
  1312. cnt = l1->read_addr_num;
  1313. while (cnt) {
  1314. qlcnic_read_dump_reg(addr, base, &data);
  1315. *buffer++ = cpu_to_le32(data);
  1316. addr += l1->read_addr_stride;
  1317. cnt--;
  1318. }
  1319. val += l1->stride;
  1320. }
  1321. return l1->no_ops * l1->read_addr_num * sizeof(u32);
  1322. }
  1323. static u32
  1324. qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
  1325. struct qlcnic_dump_entry *entry, u32 *buffer)
  1326. {
  1327. int i;
  1328. u32 cnt, val, data, addr;
  1329. u8 poll_mask, poll_to, time_out = 0;
  1330. void __iomem *base = adapter->ahw->pci_base0;
  1331. struct __cache *l2 = &entry->region.cache;
  1332. val = l2->init_tag_val;
  1333. poll_mask = LSB(MSW(l2->ctrl_val));
  1334. poll_to = MSB(MSW(l2->ctrl_val));
  1335. for (i = 0; i < l2->no_ops; i++) {
  1336. qlcnic_write_dump_reg(l2->addr, base, val);
  1337. if (LSW(l2->ctrl_val))
  1338. qlcnic_write_dump_reg(l2->ctrl_addr, base,
  1339. LSW(l2->ctrl_val));
  1340. if (!poll_mask)
  1341. goto skip_poll;
  1342. do {
  1343. qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
  1344. if (!(data & poll_mask))
  1345. break;
  1346. msleep(1);
  1347. time_out++;
  1348. } while (time_out <= poll_to);
  1349. if (time_out > poll_to) {
  1350. dev_err(&adapter->pdev->dev,
  1351. "Timeout exceeded in %s, aborting dump\n",
  1352. __func__);
  1353. return -EINVAL;
  1354. }
  1355. skip_poll:
  1356. addr = l2->read_addr;
  1357. cnt = l2->read_addr_num;
  1358. while (cnt) {
  1359. qlcnic_read_dump_reg(addr, base, &data);
  1360. *buffer++ = cpu_to_le32(data);
  1361. addr += l2->read_addr_stride;
  1362. cnt--;
  1363. }
  1364. val += l2->stride;
  1365. }
  1366. return l2->no_ops * l2->read_addr_num * sizeof(u32);
  1367. }
  1368. static u32
  1369. qlcnic_read_memory(struct qlcnic_adapter *adapter,
  1370. struct qlcnic_dump_entry *entry, u32 *buffer)
  1371. {
  1372. u32 addr, data, test, ret = 0;
  1373. int i, reg_read;
  1374. struct __mem *mem = &entry->region.mem;
  1375. void __iomem *base = adapter->ahw->pci_base0;
  1376. reg_read = mem->size;
  1377. addr = mem->addr;
  1378. /* check for data size of multiple of 16 and 16 byte alignment */
  1379. if ((addr & 0xf) || (reg_read%16)) {
  1380. dev_info(&adapter->pdev->dev,
  1381. "Unaligned memory addr:0x%x size:0x%x\n",
  1382. addr, reg_read);
  1383. return -EINVAL;
  1384. }
  1385. mutex_lock(&adapter->ahw->mem_lock);
  1386. while (reg_read != 0) {
  1387. qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
  1388. qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
  1389. qlcnic_write_dump_reg(MIU_TEST_CTR, base,
  1390. TA_CTL_ENABLE | TA_CTL_START);
  1391. for (i = 0; i < MAX_CTL_CHECK; i++) {
  1392. qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
  1393. if (!(test & TA_CTL_BUSY))
  1394. break;
  1395. }
  1396. if (i == MAX_CTL_CHECK) {
  1397. if (printk_ratelimit()) {
  1398. dev_err(&adapter->pdev->dev,
  1399. "failed to read through agent\n");
  1400. ret = -EINVAL;
  1401. goto out;
  1402. }
  1403. }
  1404. for (i = 0; i < 4; i++) {
  1405. qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
  1406. &data);
  1407. *buffer++ = cpu_to_le32(data);
  1408. }
  1409. addr += 16;
  1410. reg_read -= 16;
  1411. ret += 16;
  1412. }
  1413. out:
  1414. mutex_unlock(&adapter->ahw->mem_lock);
  1415. return mem->size;
  1416. }
  1417. static u32
  1418. qlcnic_dump_nop(struct qlcnic_adapter *adapter,
  1419. struct qlcnic_dump_entry *entry, u32 *buffer)
  1420. {
  1421. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1422. return 0;
  1423. }
  1424. static const struct qlcnic_dump_operations fw_dump_ops[] = {
  1425. { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
  1426. { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
  1427. { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
  1428. { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
  1429. { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
  1430. { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
  1431. { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
  1432. { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
  1433. { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
  1434. { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
  1435. { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
  1436. { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
  1437. { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
  1438. { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
  1439. { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
  1440. { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
  1441. { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
  1442. { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
  1443. { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
  1444. { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
  1445. };
  1446. /* Walk the template and collect dump for each entry in the dump template */
  1447. static int
  1448. qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
  1449. u32 size)
  1450. {
  1451. int ret = 1;
  1452. if (size != entry->hdr.cap_size) {
  1453. dev_info(dev,
  1454. "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
  1455. entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
  1456. dev_info(dev, "Aborting further dump capture\n");
  1457. ret = 0;
  1458. }
  1459. return ret;
  1460. }
  1461. int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
  1462. {
  1463. u32 *buffer;
  1464. char mesg[64];
  1465. char *msg[] = {mesg, NULL};
  1466. int i, k, ops_cnt, ops_index, dump_size = 0;
  1467. u32 entry_offset, dump, no_entries, buf_offset = 0;
  1468. struct qlcnic_dump_entry *entry;
  1469. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  1470. struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
  1471. if (fw_dump->clr) {
  1472. dev_info(&adapter->pdev->dev,
  1473. "Previous dump not cleared, not capturing dump\n");
  1474. return -EIO;
  1475. }
  1476. /* Calculate the size for dump data area only */
  1477. for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
  1478. if (i & tmpl_hdr->drv_cap_mask)
  1479. dump_size += tmpl_hdr->cap_sizes[k];
  1480. if (!dump_size)
  1481. return -EIO;
  1482. fw_dump->data = vzalloc(dump_size);
  1483. if (!fw_dump->data) {
  1484. dev_info(&adapter->pdev->dev,
  1485. "Unable to allocate (%d KB) for fw dump\n",
  1486. dump_size/1024);
  1487. return -ENOMEM;
  1488. }
  1489. buffer = fw_dump->data;
  1490. fw_dump->size = dump_size;
  1491. no_entries = tmpl_hdr->num_entries;
  1492. ops_cnt = ARRAY_SIZE(fw_dump_ops);
  1493. entry_offset = tmpl_hdr->offset;
  1494. tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
  1495. tmpl_hdr->sys_info[1] = adapter->fw_version;
  1496. for (i = 0; i < no_entries; i++) {
  1497. entry = (void *)tmpl_hdr + entry_offset;
  1498. if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
  1499. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1500. entry_offset += entry->hdr.offset;
  1501. continue;
  1502. }
  1503. /* Find the handler for this entry */
  1504. ops_index = 0;
  1505. while (ops_index < ops_cnt) {
  1506. if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
  1507. break;
  1508. ops_index++;
  1509. }
  1510. if (ops_index == ops_cnt) {
  1511. dev_info(&adapter->pdev->dev,
  1512. "Invalid entry type %d, exiting dump\n",
  1513. entry->hdr.type);
  1514. goto error;
  1515. }
  1516. /* Collect dump for this entry */
  1517. dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
  1518. if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
  1519. dump))
  1520. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1521. buf_offset += entry->hdr.cap_size;
  1522. entry_offset += entry->hdr.offset;
  1523. buffer = fw_dump->data + buf_offset;
  1524. }
  1525. if (dump_size != buf_offset) {
  1526. dev_info(&adapter->pdev->dev,
  1527. "Captured(%d) and expected size(%d) do not match\n",
  1528. buf_offset, dump_size);
  1529. goto error;
  1530. } else {
  1531. fw_dump->clr = 1;
  1532. snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
  1533. adapter->netdev->name);
  1534. dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
  1535. fw_dump->size);
  1536. /* Send a udev event to notify availability of FW dump */
  1537. kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
  1538. return 0;
  1539. }
  1540. error:
  1541. vfree(fw_dump->data);
  1542. return -EINVAL;
  1543. }