e1000_82575.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655
  1. /*******************************************************************************
  2. Intel(R) Gigabit Ethernet Linux driver
  3. Copyright(c) 2007-2013 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  18. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19. *******************************************************************************/
  20. /* e1000_82575
  21. * e1000_82576
  22. */
  23. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24. #include <linux/types.h>
  25. #include <linux/if_ether.h>
  26. #include <linux/i2c.h>
  27. #include "e1000_mac.h"
  28. #include "e1000_82575.h"
  29. #include "e1000_i210.h"
  30. static s32 igb_get_invariants_82575(struct e1000_hw *);
  31. static s32 igb_acquire_phy_82575(struct e1000_hw *);
  32. static void igb_release_phy_82575(struct e1000_hw *);
  33. static s32 igb_acquire_nvm_82575(struct e1000_hw *);
  34. static void igb_release_nvm_82575(struct e1000_hw *);
  35. static s32 igb_check_for_link_82575(struct e1000_hw *);
  36. static s32 igb_get_cfg_done_82575(struct e1000_hw *);
  37. static s32 igb_init_hw_82575(struct e1000_hw *);
  38. static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
  39. static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
  40. static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
  41. static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
  42. static s32 igb_reset_hw_82575(struct e1000_hw *);
  43. static s32 igb_reset_hw_82580(struct e1000_hw *);
  44. static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
  45. static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
  46. static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
  47. static s32 igb_setup_copper_link_82575(struct e1000_hw *);
  48. static s32 igb_setup_serdes_link_82575(struct e1000_hw *);
  49. static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
  50. static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
  51. static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
  52. static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
  53. u16 *);
  54. static s32 igb_get_phy_id_82575(struct e1000_hw *);
  55. static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
  56. static bool igb_sgmii_active_82575(struct e1000_hw *);
  57. static s32 igb_reset_init_script_82575(struct e1000_hw *);
  58. static s32 igb_read_mac_addr_82575(struct e1000_hw *);
  59. static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
  60. static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
  61. static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
  62. static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
  63. static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
  64. static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
  65. static const u16 e1000_82580_rxpbs_table[] =
  66. { 36, 72, 144, 1, 2, 4, 8, 16,
  67. 35, 70, 140 };
  68. #define E1000_82580_RXPBS_TABLE_SIZE \
  69. (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
  70. /**
  71. * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
  72. * @hw: pointer to the HW structure
  73. *
  74. * Called to determine if the I2C pins are being used for I2C or as an
  75. * external MDIO interface since the two options are mutually exclusive.
  76. **/
  77. static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
  78. {
  79. u32 reg = 0;
  80. bool ext_mdio = false;
  81. switch (hw->mac.type) {
  82. case e1000_82575:
  83. case e1000_82576:
  84. reg = rd32(E1000_MDIC);
  85. ext_mdio = !!(reg & E1000_MDIC_DEST);
  86. break;
  87. case e1000_82580:
  88. case e1000_i350:
  89. case e1000_i354:
  90. case e1000_i210:
  91. case e1000_i211:
  92. reg = rd32(E1000_MDICNFG);
  93. ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
  94. break;
  95. default:
  96. break;
  97. }
  98. return ext_mdio;
  99. }
  100. /**
  101. * igb_init_phy_params_82575 - Init PHY func ptrs.
  102. * @hw: pointer to the HW structure
  103. **/
  104. static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
  105. {
  106. struct e1000_phy_info *phy = &hw->phy;
  107. s32 ret_val = 0;
  108. u32 ctrl_ext;
  109. if (hw->phy.media_type != e1000_media_type_copper) {
  110. phy->type = e1000_phy_none;
  111. goto out;
  112. }
  113. phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
  114. phy->reset_delay_us = 100;
  115. ctrl_ext = rd32(E1000_CTRL_EXT);
  116. if (igb_sgmii_active_82575(hw)) {
  117. phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
  118. ctrl_ext |= E1000_CTRL_I2C_ENA;
  119. } else {
  120. phy->ops.reset = igb_phy_hw_reset;
  121. ctrl_ext &= ~E1000_CTRL_I2C_ENA;
  122. }
  123. wr32(E1000_CTRL_EXT, ctrl_ext);
  124. igb_reset_mdicnfg_82580(hw);
  125. if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
  126. phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
  127. phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
  128. } else {
  129. switch (hw->mac.type) {
  130. case e1000_82580:
  131. case e1000_i350:
  132. case e1000_i354:
  133. phy->ops.read_reg = igb_read_phy_reg_82580;
  134. phy->ops.write_reg = igb_write_phy_reg_82580;
  135. break;
  136. case e1000_i210:
  137. case e1000_i211:
  138. phy->ops.read_reg = igb_read_phy_reg_gs40g;
  139. phy->ops.write_reg = igb_write_phy_reg_gs40g;
  140. break;
  141. default:
  142. phy->ops.read_reg = igb_read_phy_reg_igp;
  143. phy->ops.write_reg = igb_write_phy_reg_igp;
  144. }
  145. }
  146. /* set lan id */
  147. hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
  148. E1000_STATUS_FUNC_SHIFT;
  149. /* Set phy->phy_addr and phy->id. */
  150. ret_val = igb_get_phy_id_82575(hw);
  151. if (ret_val)
  152. return ret_val;
  153. /* Verify phy id and set remaining function pointers */
  154. switch (phy->id) {
  155. case M88E1545_E_PHY_ID:
  156. case I347AT4_E_PHY_ID:
  157. case M88E1112_E_PHY_ID:
  158. case M88E1111_I_PHY_ID:
  159. phy->type = e1000_phy_m88;
  160. phy->ops.check_polarity = igb_check_polarity_m88;
  161. phy->ops.get_phy_info = igb_get_phy_info_m88;
  162. if (phy->id != M88E1111_I_PHY_ID)
  163. phy->ops.get_cable_length =
  164. igb_get_cable_length_m88_gen2;
  165. else
  166. phy->ops.get_cable_length = igb_get_cable_length_m88;
  167. phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
  168. break;
  169. case IGP03E1000_E_PHY_ID:
  170. phy->type = e1000_phy_igp_3;
  171. phy->ops.get_phy_info = igb_get_phy_info_igp;
  172. phy->ops.get_cable_length = igb_get_cable_length_igp_2;
  173. phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
  174. phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
  175. phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
  176. break;
  177. case I82580_I_PHY_ID:
  178. case I350_I_PHY_ID:
  179. phy->type = e1000_phy_82580;
  180. phy->ops.force_speed_duplex =
  181. igb_phy_force_speed_duplex_82580;
  182. phy->ops.get_cable_length = igb_get_cable_length_82580;
  183. phy->ops.get_phy_info = igb_get_phy_info_82580;
  184. phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
  185. phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
  186. break;
  187. case I210_I_PHY_ID:
  188. phy->type = e1000_phy_i210;
  189. phy->ops.check_polarity = igb_check_polarity_m88;
  190. phy->ops.get_phy_info = igb_get_phy_info_m88;
  191. phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
  192. phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
  193. phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
  194. phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
  195. break;
  196. default:
  197. ret_val = -E1000_ERR_PHY;
  198. goto out;
  199. }
  200. out:
  201. return ret_val;
  202. }
  203. /**
  204. * igb_init_nvm_params_82575 - Init NVM func ptrs.
  205. * @hw: pointer to the HW structure
  206. **/
  207. static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
  208. {
  209. struct e1000_nvm_info *nvm = &hw->nvm;
  210. u32 eecd = rd32(E1000_EECD);
  211. u16 size;
  212. size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
  213. E1000_EECD_SIZE_EX_SHIFT);
  214. /* Added to a constant, "size" becomes the left-shift value
  215. * for setting word_size.
  216. */
  217. size += NVM_WORD_SIZE_BASE_SHIFT;
  218. /* Just in case size is out of range, cap it to the largest
  219. * EEPROM size supported
  220. */
  221. if (size > 15)
  222. size = 15;
  223. nvm->word_size = 1 << size;
  224. if (hw->mac.type < e1000_i210) {
  225. nvm->opcode_bits = 8;
  226. nvm->delay_usec = 1;
  227. switch (nvm->override) {
  228. case e1000_nvm_override_spi_large:
  229. nvm->page_size = 32;
  230. nvm->address_bits = 16;
  231. break;
  232. case e1000_nvm_override_spi_small:
  233. nvm->page_size = 8;
  234. nvm->address_bits = 8;
  235. break;
  236. default:
  237. nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
  238. nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
  239. 16 : 8;
  240. break;
  241. }
  242. if (nvm->word_size == (1 << 15))
  243. nvm->page_size = 128;
  244. nvm->type = e1000_nvm_eeprom_spi;
  245. } else {
  246. nvm->type = e1000_nvm_flash_hw;
  247. }
  248. /* NVM Function Pointers */
  249. switch (hw->mac.type) {
  250. case e1000_82580:
  251. nvm->ops.validate = igb_validate_nvm_checksum_82580;
  252. nvm->ops.update = igb_update_nvm_checksum_82580;
  253. nvm->ops.acquire = igb_acquire_nvm_82575;
  254. nvm->ops.release = igb_release_nvm_82575;
  255. if (nvm->word_size < (1 << 15))
  256. nvm->ops.read = igb_read_nvm_eerd;
  257. else
  258. nvm->ops.read = igb_read_nvm_spi;
  259. nvm->ops.write = igb_write_nvm_spi;
  260. break;
  261. case e1000_i354:
  262. case e1000_i350:
  263. nvm->ops.validate = igb_validate_nvm_checksum_i350;
  264. nvm->ops.update = igb_update_nvm_checksum_i350;
  265. nvm->ops.acquire = igb_acquire_nvm_82575;
  266. nvm->ops.release = igb_release_nvm_82575;
  267. if (nvm->word_size < (1 << 15))
  268. nvm->ops.read = igb_read_nvm_eerd;
  269. else
  270. nvm->ops.read = igb_read_nvm_spi;
  271. nvm->ops.write = igb_write_nvm_spi;
  272. break;
  273. case e1000_i210:
  274. nvm->ops.validate = igb_validate_nvm_checksum_i210;
  275. nvm->ops.update = igb_update_nvm_checksum_i210;
  276. nvm->ops.acquire = igb_acquire_nvm_i210;
  277. nvm->ops.release = igb_release_nvm_i210;
  278. nvm->ops.read = igb_read_nvm_srrd_i210;
  279. nvm->ops.write = igb_write_nvm_srwr_i210;
  280. nvm->ops.valid_led_default = igb_valid_led_default_i210;
  281. break;
  282. case e1000_i211:
  283. nvm->ops.acquire = igb_acquire_nvm_i210;
  284. nvm->ops.release = igb_release_nvm_i210;
  285. nvm->ops.read = igb_read_nvm_i211;
  286. nvm->ops.valid_led_default = igb_valid_led_default_i210;
  287. nvm->ops.validate = NULL;
  288. nvm->ops.update = NULL;
  289. nvm->ops.write = NULL;
  290. break;
  291. default:
  292. nvm->ops.validate = igb_validate_nvm_checksum;
  293. nvm->ops.update = igb_update_nvm_checksum;
  294. nvm->ops.acquire = igb_acquire_nvm_82575;
  295. nvm->ops.release = igb_release_nvm_82575;
  296. if (nvm->word_size < (1 << 15))
  297. nvm->ops.read = igb_read_nvm_eerd;
  298. else
  299. nvm->ops.read = igb_read_nvm_spi;
  300. nvm->ops.write = igb_write_nvm_spi;
  301. break;
  302. }
  303. return 0;
  304. }
  305. /**
  306. * igb_init_mac_params_82575 - Init MAC func ptrs.
  307. * @hw: pointer to the HW structure
  308. **/
  309. static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
  310. {
  311. struct e1000_mac_info *mac = &hw->mac;
  312. struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
  313. /* Set mta register count */
  314. mac->mta_reg_count = 128;
  315. /* Set rar entry count */
  316. switch (mac->type) {
  317. case e1000_82576:
  318. mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
  319. break;
  320. case e1000_82580:
  321. mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
  322. break;
  323. case e1000_i350:
  324. case e1000_i354:
  325. mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
  326. break;
  327. default:
  328. mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
  329. break;
  330. }
  331. /* reset */
  332. if (mac->type >= e1000_82580)
  333. mac->ops.reset_hw = igb_reset_hw_82580;
  334. else
  335. mac->ops.reset_hw = igb_reset_hw_82575;
  336. if (mac->type >= e1000_i210) {
  337. mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
  338. mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
  339. } else {
  340. mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
  341. mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
  342. }
  343. /* Set if part includes ASF firmware */
  344. mac->asf_firmware_present = true;
  345. /* Set if manageability features are enabled. */
  346. mac->arc_subsystem_valid =
  347. (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
  348. ? true : false;
  349. /* enable EEE on i350 parts and later parts */
  350. if (mac->type >= e1000_i350)
  351. dev_spec->eee_disable = false;
  352. else
  353. dev_spec->eee_disable = true;
  354. /* Allow a single clear of the SW semaphore on I210 and newer */
  355. if (mac->type >= e1000_i210)
  356. dev_spec->clear_semaphore_once = true;
  357. /* physical interface link setup */
  358. mac->ops.setup_physical_interface =
  359. (hw->phy.media_type == e1000_media_type_copper)
  360. ? igb_setup_copper_link_82575
  361. : igb_setup_serdes_link_82575;
  362. return 0;
  363. }
  364. static s32 igb_get_invariants_82575(struct e1000_hw *hw)
  365. {
  366. struct e1000_mac_info *mac = &hw->mac;
  367. struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
  368. s32 ret_val;
  369. u32 ctrl_ext = 0;
  370. switch (hw->device_id) {
  371. case E1000_DEV_ID_82575EB_COPPER:
  372. case E1000_DEV_ID_82575EB_FIBER_SERDES:
  373. case E1000_DEV_ID_82575GB_QUAD_COPPER:
  374. mac->type = e1000_82575;
  375. break;
  376. case E1000_DEV_ID_82576:
  377. case E1000_DEV_ID_82576_NS:
  378. case E1000_DEV_ID_82576_NS_SERDES:
  379. case E1000_DEV_ID_82576_FIBER:
  380. case E1000_DEV_ID_82576_SERDES:
  381. case E1000_DEV_ID_82576_QUAD_COPPER:
  382. case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
  383. case E1000_DEV_ID_82576_SERDES_QUAD:
  384. mac->type = e1000_82576;
  385. break;
  386. case E1000_DEV_ID_82580_COPPER:
  387. case E1000_DEV_ID_82580_FIBER:
  388. case E1000_DEV_ID_82580_QUAD_FIBER:
  389. case E1000_DEV_ID_82580_SERDES:
  390. case E1000_DEV_ID_82580_SGMII:
  391. case E1000_DEV_ID_82580_COPPER_DUAL:
  392. case E1000_DEV_ID_DH89XXCC_SGMII:
  393. case E1000_DEV_ID_DH89XXCC_SERDES:
  394. case E1000_DEV_ID_DH89XXCC_BACKPLANE:
  395. case E1000_DEV_ID_DH89XXCC_SFP:
  396. mac->type = e1000_82580;
  397. break;
  398. case E1000_DEV_ID_I350_COPPER:
  399. case E1000_DEV_ID_I350_FIBER:
  400. case E1000_DEV_ID_I350_SERDES:
  401. case E1000_DEV_ID_I350_SGMII:
  402. mac->type = e1000_i350;
  403. break;
  404. case E1000_DEV_ID_I210_COPPER:
  405. case E1000_DEV_ID_I210_COPPER_OEM1:
  406. case E1000_DEV_ID_I210_COPPER_IT:
  407. case E1000_DEV_ID_I210_FIBER:
  408. case E1000_DEV_ID_I210_SERDES:
  409. case E1000_DEV_ID_I210_SGMII:
  410. mac->type = e1000_i210;
  411. break;
  412. case E1000_DEV_ID_I211_COPPER:
  413. mac->type = e1000_i211;
  414. break;
  415. case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
  416. case E1000_DEV_ID_I354_SGMII:
  417. case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
  418. mac->type = e1000_i354;
  419. break;
  420. default:
  421. return -E1000_ERR_MAC_INIT;
  422. break;
  423. }
  424. /* Set media type */
  425. /* The 82575 uses bits 22:23 for link mode. The mode can be changed
  426. * based on the EEPROM. We cannot rely upon device ID. There
  427. * is no distinguishable difference between fiber and internal
  428. * SerDes mode on the 82575. There can be an external PHY attached
  429. * on the SGMII interface. For this, we'll set sgmii_active to true.
  430. */
  431. hw->phy.media_type = e1000_media_type_copper;
  432. dev_spec->sgmii_active = false;
  433. ctrl_ext = rd32(E1000_CTRL_EXT);
  434. switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
  435. case E1000_CTRL_EXT_LINK_MODE_SGMII:
  436. dev_spec->sgmii_active = true;
  437. break;
  438. case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
  439. case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
  440. hw->phy.media_type = e1000_media_type_internal_serdes;
  441. break;
  442. default:
  443. break;
  444. }
  445. /* mac initialization and operations */
  446. ret_val = igb_init_mac_params_82575(hw);
  447. if (ret_val)
  448. goto out;
  449. /* NVM initialization */
  450. ret_val = igb_init_nvm_params_82575(hw);
  451. if (ret_val)
  452. goto out;
  453. /* if part supports SR-IOV then initialize mailbox parameters */
  454. switch (mac->type) {
  455. case e1000_82576:
  456. case e1000_i350:
  457. igb_init_mbx_params_pf(hw);
  458. break;
  459. default:
  460. break;
  461. }
  462. /* setup PHY parameters */
  463. ret_val = igb_init_phy_params_82575(hw);
  464. out:
  465. return ret_val;
  466. }
  467. /**
  468. * igb_acquire_phy_82575 - Acquire rights to access PHY
  469. * @hw: pointer to the HW structure
  470. *
  471. * Acquire access rights to the correct PHY. This is a
  472. * function pointer entry point called by the api module.
  473. **/
  474. static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
  475. {
  476. u16 mask = E1000_SWFW_PHY0_SM;
  477. if (hw->bus.func == E1000_FUNC_1)
  478. mask = E1000_SWFW_PHY1_SM;
  479. else if (hw->bus.func == E1000_FUNC_2)
  480. mask = E1000_SWFW_PHY2_SM;
  481. else if (hw->bus.func == E1000_FUNC_3)
  482. mask = E1000_SWFW_PHY3_SM;
  483. return hw->mac.ops.acquire_swfw_sync(hw, mask);
  484. }
  485. /**
  486. * igb_release_phy_82575 - Release rights to access PHY
  487. * @hw: pointer to the HW structure
  488. *
  489. * A wrapper to release access rights to the correct PHY. This is a
  490. * function pointer entry point called by the api module.
  491. **/
  492. static void igb_release_phy_82575(struct e1000_hw *hw)
  493. {
  494. u16 mask = E1000_SWFW_PHY0_SM;
  495. if (hw->bus.func == E1000_FUNC_1)
  496. mask = E1000_SWFW_PHY1_SM;
  497. else if (hw->bus.func == E1000_FUNC_2)
  498. mask = E1000_SWFW_PHY2_SM;
  499. else if (hw->bus.func == E1000_FUNC_3)
  500. mask = E1000_SWFW_PHY3_SM;
  501. hw->mac.ops.release_swfw_sync(hw, mask);
  502. }
  503. /**
  504. * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
  505. * @hw: pointer to the HW structure
  506. * @offset: register offset to be read
  507. * @data: pointer to the read data
  508. *
  509. * Reads the PHY register at offset using the serial gigabit media independent
  510. * interface and stores the retrieved information in data.
  511. **/
  512. static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
  513. u16 *data)
  514. {
  515. s32 ret_val = -E1000_ERR_PARAM;
  516. if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
  517. hw_dbg("PHY Address %u is out of range\n", offset);
  518. goto out;
  519. }
  520. ret_val = hw->phy.ops.acquire(hw);
  521. if (ret_val)
  522. goto out;
  523. ret_val = igb_read_phy_reg_i2c(hw, offset, data);
  524. hw->phy.ops.release(hw);
  525. out:
  526. return ret_val;
  527. }
  528. /**
  529. * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
  530. * @hw: pointer to the HW structure
  531. * @offset: register offset to write to
  532. * @data: data to write at register offset
  533. *
  534. * Writes the data to PHY register at the offset using the serial gigabit
  535. * media independent interface.
  536. **/
  537. static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
  538. u16 data)
  539. {
  540. s32 ret_val = -E1000_ERR_PARAM;
  541. if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
  542. hw_dbg("PHY Address %d is out of range\n", offset);
  543. goto out;
  544. }
  545. ret_val = hw->phy.ops.acquire(hw);
  546. if (ret_val)
  547. goto out;
  548. ret_val = igb_write_phy_reg_i2c(hw, offset, data);
  549. hw->phy.ops.release(hw);
  550. out:
  551. return ret_val;
  552. }
  553. /**
  554. * igb_get_phy_id_82575 - Retrieve PHY addr and id
  555. * @hw: pointer to the HW structure
  556. *
  557. * Retrieves the PHY address and ID for both PHY's which do and do not use
  558. * sgmi interface.
  559. **/
  560. static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
  561. {
  562. struct e1000_phy_info *phy = &hw->phy;
  563. s32 ret_val = 0;
  564. u16 phy_id;
  565. u32 ctrl_ext;
  566. u32 mdic;
  567. /* For SGMII PHYs, we try the list of possible addresses until
  568. * we find one that works. For non-SGMII PHYs
  569. * (e.g. integrated copper PHYs), an address of 1 should
  570. * work. The result of this function should mean phy->phy_addr
  571. * and phy->id are set correctly.
  572. */
  573. if (!(igb_sgmii_active_82575(hw))) {
  574. phy->addr = 1;
  575. ret_val = igb_get_phy_id(hw);
  576. goto out;
  577. }
  578. if (igb_sgmii_uses_mdio_82575(hw)) {
  579. switch (hw->mac.type) {
  580. case e1000_82575:
  581. case e1000_82576:
  582. mdic = rd32(E1000_MDIC);
  583. mdic &= E1000_MDIC_PHY_MASK;
  584. phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
  585. break;
  586. case e1000_82580:
  587. case e1000_i350:
  588. case e1000_i354:
  589. case e1000_i210:
  590. case e1000_i211:
  591. mdic = rd32(E1000_MDICNFG);
  592. mdic &= E1000_MDICNFG_PHY_MASK;
  593. phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
  594. break;
  595. default:
  596. ret_val = -E1000_ERR_PHY;
  597. goto out;
  598. break;
  599. }
  600. ret_val = igb_get_phy_id(hw);
  601. goto out;
  602. }
  603. /* Power on sgmii phy if it is disabled */
  604. ctrl_ext = rd32(E1000_CTRL_EXT);
  605. wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
  606. wrfl();
  607. msleep(300);
  608. /* The address field in the I2CCMD register is 3 bits and 0 is invalid.
  609. * Therefore, we need to test 1-7
  610. */
  611. for (phy->addr = 1; phy->addr < 8; phy->addr++) {
  612. ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
  613. if (ret_val == 0) {
  614. hw_dbg("Vendor ID 0x%08X read at address %u\n",
  615. phy_id, phy->addr);
  616. /* At the time of this writing, The M88 part is
  617. * the only supported SGMII PHY product.
  618. */
  619. if (phy_id == M88_VENDOR)
  620. break;
  621. } else {
  622. hw_dbg("PHY address %u was unreadable\n", phy->addr);
  623. }
  624. }
  625. /* A valid PHY type couldn't be found. */
  626. if (phy->addr == 8) {
  627. phy->addr = 0;
  628. ret_val = -E1000_ERR_PHY;
  629. goto out;
  630. } else {
  631. ret_val = igb_get_phy_id(hw);
  632. }
  633. /* restore previous sfp cage power state */
  634. wr32(E1000_CTRL_EXT, ctrl_ext);
  635. out:
  636. return ret_val;
  637. }
  638. /**
  639. * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
  640. * @hw: pointer to the HW structure
  641. *
  642. * Resets the PHY using the serial gigabit media independent interface.
  643. **/
  644. static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
  645. {
  646. s32 ret_val;
  647. /* This isn't a true "hard" reset, but is the only reset
  648. * available to us at this time.
  649. */
  650. hw_dbg("Soft resetting SGMII attached PHY...\n");
  651. /* SFP documentation requires the following to configure the SPF module
  652. * to work on SGMII. No further documentation is given.
  653. */
  654. ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
  655. if (ret_val)
  656. goto out;
  657. ret_val = igb_phy_sw_reset(hw);
  658. out:
  659. return ret_val;
  660. }
  661. /**
  662. * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
  663. * @hw: pointer to the HW structure
  664. * @active: true to enable LPLU, false to disable
  665. *
  666. * Sets the LPLU D0 state according to the active flag. When
  667. * activating LPLU this function also disables smart speed
  668. * and vice versa. LPLU will not be activated unless the
  669. * device autonegotiation advertisement meets standards of
  670. * either 10 or 10/100 or 10/100/1000 at all duplexes.
  671. * This is a function pointer entry point only called by
  672. * PHY setup routines.
  673. **/
  674. static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
  675. {
  676. struct e1000_phy_info *phy = &hw->phy;
  677. s32 ret_val;
  678. u16 data;
  679. ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
  680. if (ret_val)
  681. goto out;
  682. if (active) {
  683. data |= IGP02E1000_PM_D0_LPLU;
  684. ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
  685. data);
  686. if (ret_val)
  687. goto out;
  688. /* When LPLU is enabled, we should disable SmartSpeed */
  689. ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
  690. &data);
  691. data &= ~IGP01E1000_PSCFR_SMART_SPEED;
  692. ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
  693. data);
  694. if (ret_val)
  695. goto out;
  696. } else {
  697. data &= ~IGP02E1000_PM_D0_LPLU;
  698. ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
  699. data);
  700. /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
  701. * during Dx states where the power conservation is most
  702. * important. During driver activity we should enable
  703. * SmartSpeed, so performance is maintained.
  704. */
  705. if (phy->smart_speed == e1000_smart_speed_on) {
  706. ret_val = phy->ops.read_reg(hw,
  707. IGP01E1000_PHY_PORT_CONFIG, &data);
  708. if (ret_val)
  709. goto out;
  710. data |= IGP01E1000_PSCFR_SMART_SPEED;
  711. ret_val = phy->ops.write_reg(hw,
  712. IGP01E1000_PHY_PORT_CONFIG, data);
  713. if (ret_val)
  714. goto out;
  715. } else if (phy->smart_speed == e1000_smart_speed_off) {
  716. ret_val = phy->ops.read_reg(hw,
  717. IGP01E1000_PHY_PORT_CONFIG, &data);
  718. if (ret_val)
  719. goto out;
  720. data &= ~IGP01E1000_PSCFR_SMART_SPEED;
  721. ret_val = phy->ops.write_reg(hw,
  722. IGP01E1000_PHY_PORT_CONFIG, data);
  723. if (ret_val)
  724. goto out;
  725. }
  726. }
  727. out:
  728. return ret_val;
  729. }
  730. /**
  731. * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
  732. * @hw: pointer to the HW structure
  733. * @active: true to enable LPLU, false to disable
  734. *
  735. * Sets the LPLU D0 state according to the active flag. When
  736. * activating LPLU this function also disables smart speed
  737. * and vice versa. LPLU will not be activated unless the
  738. * device autonegotiation advertisement meets standards of
  739. * either 10 or 10/100 or 10/100/1000 at all duplexes.
  740. * This is a function pointer entry point only called by
  741. * PHY setup routines.
  742. **/
  743. static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
  744. {
  745. struct e1000_phy_info *phy = &hw->phy;
  746. s32 ret_val = 0;
  747. u16 data;
  748. data = rd32(E1000_82580_PHY_POWER_MGMT);
  749. if (active) {
  750. data |= E1000_82580_PM_D0_LPLU;
  751. /* When LPLU is enabled, we should disable SmartSpeed */
  752. data &= ~E1000_82580_PM_SPD;
  753. } else {
  754. data &= ~E1000_82580_PM_D0_LPLU;
  755. /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
  756. * during Dx states where the power conservation is most
  757. * important. During driver activity we should enable
  758. * SmartSpeed, so performance is maintained.
  759. */
  760. if (phy->smart_speed == e1000_smart_speed_on)
  761. data |= E1000_82580_PM_SPD;
  762. else if (phy->smart_speed == e1000_smart_speed_off)
  763. data &= ~E1000_82580_PM_SPD; }
  764. wr32(E1000_82580_PHY_POWER_MGMT, data);
  765. return ret_val;
  766. }
  767. /**
  768. * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
  769. * @hw: pointer to the HW structure
  770. * @active: boolean used to enable/disable lplu
  771. *
  772. * Success returns 0, Failure returns 1
  773. *
  774. * The low power link up (lplu) state is set to the power management level D3
  775. * and SmartSpeed is disabled when active is true, else clear lplu for D3
  776. * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
  777. * is used during Dx states where the power conservation is most important.
  778. * During driver activity, SmartSpeed should be enabled so performance is
  779. * maintained.
  780. **/
  781. static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
  782. {
  783. struct e1000_phy_info *phy = &hw->phy;
  784. s32 ret_val = 0;
  785. u16 data;
  786. data = rd32(E1000_82580_PHY_POWER_MGMT);
  787. if (!active) {
  788. data &= ~E1000_82580_PM_D3_LPLU;
  789. /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
  790. * during Dx states where the power conservation is most
  791. * important. During driver activity we should enable
  792. * SmartSpeed, so performance is maintained.
  793. */
  794. if (phy->smart_speed == e1000_smart_speed_on)
  795. data |= E1000_82580_PM_SPD;
  796. else if (phy->smart_speed == e1000_smart_speed_off)
  797. data &= ~E1000_82580_PM_SPD;
  798. } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
  799. (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
  800. (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
  801. data |= E1000_82580_PM_D3_LPLU;
  802. /* When LPLU is enabled, we should disable SmartSpeed */
  803. data &= ~E1000_82580_PM_SPD;
  804. }
  805. wr32(E1000_82580_PHY_POWER_MGMT, data);
  806. return ret_val;
  807. }
  808. /**
  809. * igb_acquire_nvm_82575 - Request for access to EEPROM
  810. * @hw: pointer to the HW structure
  811. *
  812. * Acquire the necessary semaphores for exclusive access to the EEPROM.
  813. * Set the EEPROM access request bit and wait for EEPROM access grant bit.
  814. * Return successful if access grant bit set, else clear the request for
  815. * EEPROM access and return -E1000_ERR_NVM (-1).
  816. **/
  817. static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
  818. {
  819. s32 ret_val;
  820. ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
  821. if (ret_val)
  822. goto out;
  823. ret_val = igb_acquire_nvm(hw);
  824. if (ret_val)
  825. hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
  826. out:
  827. return ret_val;
  828. }
  829. /**
  830. * igb_release_nvm_82575 - Release exclusive access to EEPROM
  831. * @hw: pointer to the HW structure
  832. *
  833. * Stop any current commands to the EEPROM and clear the EEPROM request bit,
  834. * then release the semaphores acquired.
  835. **/
  836. static void igb_release_nvm_82575(struct e1000_hw *hw)
  837. {
  838. igb_release_nvm(hw);
  839. hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
  840. }
  841. /**
  842. * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
  843. * @hw: pointer to the HW structure
  844. * @mask: specifies which semaphore to acquire
  845. *
  846. * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
  847. * will also specify which port we're acquiring the lock for.
  848. **/
  849. static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
  850. {
  851. u32 swfw_sync;
  852. u32 swmask = mask;
  853. u32 fwmask = mask << 16;
  854. s32 ret_val = 0;
  855. s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
  856. while (i < timeout) {
  857. if (igb_get_hw_semaphore(hw)) {
  858. ret_val = -E1000_ERR_SWFW_SYNC;
  859. goto out;
  860. }
  861. swfw_sync = rd32(E1000_SW_FW_SYNC);
  862. if (!(swfw_sync & (fwmask | swmask)))
  863. break;
  864. /* Firmware currently using resource (fwmask)
  865. * or other software thread using resource (swmask)
  866. */
  867. igb_put_hw_semaphore(hw);
  868. mdelay(5);
  869. i++;
  870. }
  871. if (i == timeout) {
  872. hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
  873. ret_val = -E1000_ERR_SWFW_SYNC;
  874. goto out;
  875. }
  876. swfw_sync |= swmask;
  877. wr32(E1000_SW_FW_SYNC, swfw_sync);
  878. igb_put_hw_semaphore(hw);
  879. out:
  880. return ret_val;
  881. }
  882. /**
  883. * igb_release_swfw_sync_82575 - Release SW/FW semaphore
  884. * @hw: pointer to the HW structure
  885. * @mask: specifies which semaphore to acquire
  886. *
  887. * Release the SW/FW semaphore used to access the PHY or NVM. The mask
  888. * will also specify which port we're releasing the lock for.
  889. **/
  890. static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
  891. {
  892. u32 swfw_sync;
  893. while (igb_get_hw_semaphore(hw) != 0);
  894. /* Empty */
  895. swfw_sync = rd32(E1000_SW_FW_SYNC);
  896. swfw_sync &= ~mask;
  897. wr32(E1000_SW_FW_SYNC, swfw_sync);
  898. igb_put_hw_semaphore(hw);
  899. }
  900. /**
  901. * igb_get_cfg_done_82575 - Read config done bit
  902. * @hw: pointer to the HW structure
  903. *
  904. * Read the management control register for the config done bit for
  905. * completion status. NOTE: silicon which is EEPROM-less will fail trying
  906. * to read the config done bit, so an error is *ONLY* logged and returns
  907. * 0. If we were to return with error, EEPROM-less silicon
  908. * would not be able to be reset or change link.
  909. **/
  910. static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
  911. {
  912. s32 timeout = PHY_CFG_TIMEOUT;
  913. s32 ret_val = 0;
  914. u32 mask = E1000_NVM_CFG_DONE_PORT_0;
  915. if (hw->bus.func == 1)
  916. mask = E1000_NVM_CFG_DONE_PORT_1;
  917. else if (hw->bus.func == E1000_FUNC_2)
  918. mask = E1000_NVM_CFG_DONE_PORT_2;
  919. else if (hw->bus.func == E1000_FUNC_3)
  920. mask = E1000_NVM_CFG_DONE_PORT_3;
  921. while (timeout) {
  922. if (rd32(E1000_EEMNGCTL) & mask)
  923. break;
  924. msleep(1);
  925. timeout--;
  926. }
  927. if (!timeout)
  928. hw_dbg("MNG configuration cycle has not completed.\n");
  929. /* If EEPROM is not marked present, init the PHY manually */
  930. if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
  931. (hw->phy.type == e1000_phy_igp_3))
  932. igb_phy_init_script_igp3(hw);
  933. return ret_val;
  934. }
  935. /**
  936. * igb_check_for_link_82575 - Check for link
  937. * @hw: pointer to the HW structure
  938. *
  939. * If sgmii is enabled, then use the pcs register to determine link, otherwise
  940. * use the generic interface for determining link.
  941. **/
  942. static s32 igb_check_for_link_82575(struct e1000_hw *hw)
  943. {
  944. s32 ret_val;
  945. u16 speed, duplex;
  946. if (hw->phy.media_type != e1000_media_type_copper) {
  947. ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
  948. &duplex);
  949. /* Use this flag to determine if link needs to be checked or
  950. * not. If we have link clear the flag so that we do not
  951. * continue to check for link.
  952. */
  953. hw->mac.get_link_status = !hw->mac.serdes_has_link;
  954. /* Configure Flow Control now that Auto-Neg has completed.
  955. * First, we need to restore the desired flow control
  956. * settings because we may have had to re-autoneg with a
  957. * different link partner.
  958. */
  959. ret_val = igb_config_fc_after_link_up(hw);
  960. if (ret_val)
  961. hw_dbg("Error configuring flow control\n");
  962. } else {
  963. ret_val = igb_check_for_copper_link(hw);
  964. }
  965. return ret_val;
  966. }
  967. /**
  968. * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
  969. * @hw: pointer to the HW structure
  970. **/
  971. void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
  972. {
  973. u32 reg;
  974. if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
  975. !igb_sgmii_active_82575(hw))
  976. return;
  977. /* Enable PCS to turn on link */
  978. reg = rd32(E1000_PCS_CFG0);
  979. reg |= E1000_PCS_CFG_PCS_EN;
  980. wr32(E1000_PCS_CFG0, reg);
  981. /* Power up the laser */
  982. reg = rd32(E1000_CTRL_EXT);
  983. reg &= ~E1000_CTRL_EXT_SDP3_DATA;
  984. wr32(E1000_CTRL_EXT, reg);
  985. /* flush the write to verify completion */
  986. wrfl();
  987. msleep(1);
  988. }
  989. /**
  990. * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
  991. * @hw: pointer to the HW structure
  992. * @speed: stores the current speed
  993. * @duplex: stores the current duplex
  994. *
  995. * Using the physical coding sub-layer (PCS), retrieve the current speed and
  996. * duplex, then store the values in the pointers provided.
  997. **/
  998. static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
  999. u16 *duplex)
  1000. {
  1001. struct e1000_mac_info *mac = &hw->mac;
  1002. u32 pcs;
  1003. /* Set up defaults for the return values of this function */
  1004. mac->serdes_has_link = false;
  1005. *speed = 0;
  1006. *duplex = 0;
  1007. /* Read the PCS Status register for link state. For non-copper mode,
  1008. * the status register is not accurate. The PCS status register is
  1009. * used instead.
  1010. */
  1011. pcs = rd32(E1000_PCS_LSTAT);
  1012. /* The link up bit determines when link is up on autoneg. The sync ok
  1013. * gets set once both sides sync up and agree upon link. Stable link
  1014. * can be determined by checking for both link up and link sync ok
  1015. */
  1016. if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
  1017. mac->serdes_has_link = true;
  1018. /* Detect and store PCS speed */
  1019. if (pcs & E1000_PCS_LSTS_SPEED_1000) {
  1020. *speed = SPEED_1000;
  1021. } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
  1022. *speed = SPEED_100;
  1023. } else {
  1024. *speed = SPEED_10;
  1025. }
  1026. /* Detect and store PCS duplex */
  1027. if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
  1028. *duplex = FULL_DUPLEX;
  1029. } else {
  1030. *duplex = HALF_DUPLEX;
  1031. }
  1032. }
  1033. return 0;
  1034. }
  1035. /**
  1036. * igb_shutdown_serdes_link_82575 - Remove link during power down
  1037. * @hw: pointer to the HW structure
  1038. *
  1039. * In the case of fiber serdes, shut down optics and PCS on driver unload
  1040. * when management pass thru is not enabled.
  1041. **/
  1042. void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
  1043. {
  1044. u32 reg;
  1045. if (hw->phy.media_type != e1000_media_type_internal_serdes &&
  1046. igb_sgmii_active_82575(hw))
  1047. return;
  1048. if (!igb_enable_mng_pass_thru(hw)) {
  1049. /* Disable PCS to turn off link */
  1050. reg = rd32(E1000_PCS_CFG0);
  1051. reg &= ~E1000_PCS_CFG_PCS_EN;
  1052. wr32(E1000_PCS_CFG0, reg);
  1053. /* shutdown the laser */
  1054. reg = rd32(E1000_CTRL_EXT);
  1055. reg |= E1000_CTRL_EXT_SDP3_DATA;
  1056. wr32(E1000_CTRL_EXT, reg);
  1057. /* flush the write to verify completion */
  1058. wrfl();
  1059. msleep(1);
  1060. }
  1061. }
  1062. /**
  1063. * igb_reset_hw_82575 - Reset hardware
  1064. * @hw: pointer to the HW structure
  1065. *
  1066. * This resets the hardware into a known state. This is a
  1067. * function pointer entry point called by the api module.
  1068. **/
  1069. static s32 igb_reset_hw_82575(struct e1000_hw *hw)
  1070. {
  1071. u32 ctrl, icr;
  1072. s32 ret_val;
  1073. /* Prevent the PCI-E bus from sticking if there is no TLP connection
  1074. * on the last TLP read/write transaction when MAC is reset.
  1075. */
  1076. ret_val = igb_disable_pcie_master(hw);
  1077. if (ret_val)
  1078. hw_dbg("PCI-E Master disable polling has failed.\n");
  1079. /* set the completion timeout for interface */
  1080. ret_val = igb_set_pcie_completion_timeout(hw);
  1081. if (ret_val) {
  1082. hw_dbg("PCI-E Set completion timeout has failed.\n");
  1083. }
  1084. hw_dbg("Masking off all interrupts\n");
  1085. wr32(E1000_IMC, 0xffffffff);
  1086. wr32(E1000_RCTL, 0);
  1087. wr32(E1000_TCTL, E1000_TCTL_PSP);
  1088. wrfl();
  1089. msleep(10);
  1090. ctrl = rd32(E1000_CTRL);
  1091. hw_dbg("Issuing a global reset to MAC\n");
  1092. wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
  1093. ret_val = igb_get_auto_rd_done(hw);
  1094. if (ret_val) {
  1095. /* When auto config read does not complete, do not
  1096. * return with an error. This can happen in situations
  1097. * where there is no eeprom and prevents getting link.
  1098. */
  1099. hw_dbg("Auto Read Done did not complete\n");
  1100. }
  1101. /* If EEPROM is not present, run manual init scripts */
  1102. if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
  1103. igb_reset_init_script_82575(hw);
  1104. /* Clear any pending interrupt events. */
  1105. wr32(E1000_IMC, 0xffffffff);
  1106. icr = rd32(E1000_ICR);
  1107. /* Install any alternate MAC address into RAR0 */
  1108. ret_val = igb_check_alt_mac_addr(hw);
  1109. return ret_val;
  1110. }
  1111. /**
  1112. * igb_init_hw_82575 - Initialize hardware
  1113. * @hw: pointer to the HW structure
  1114. *
  1115. * This inits the hardware readying it for operation.
  1116. **/
  1117. static s32 igb_init_hw_82575(struct e1000_hw *hw)
  1118. {
  1119. struct e1000_mac_info *mac = &hw->mac;
  1120. s32 ret_val;
  1121. u16 i, rar_count = mac->rar_entry_count;
  1122. /* Initialize identification LED */
  1123. ret_val = igb_id_led_init(hw);
  1124. if (ret_val) {
  1125. hw_dbg("Error initializing identification LED\n");
  1126. /* This is not fatal and we should not stop init due to this */
  1127. }
  1128. /* Disabling VLAN filtering */
  1129. hw_dbg("Initializing the IEEE VLAN\n");
  1130. if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
  1131. igb_clear_vfta_i350(hw);
  1132. else
  1133. igb_clear_vfta(hw);
  1134. /* Setup the receive address */
  1135. igb_init_rx_addrs(hw, rar_count);
  1136. /* Zero out the Multicast HASH table */
  1137. hw_dbg("Zeroing the MTA\n");
  1138. for (i = 0; i < mac->mta_reg_count; i++)
  1139. array_wr32(E1000_MTA, i, 0);
  1140. /* Zero out the Unicast HASH table */
  1141. hw_dbg("Zeroing the UTA\n");
  1142. for (i = 0; i < mac->uta_reg_count; i++)
  1143. array_wr32(E1000_UTA, i, 0);
  1144. /* Setup link and flow control */
  1145. ret_val = igb_setup_link(hw);
  1146. /* Clear all of the statistics registers (clear on read). It is
  1147. * important that we do this after we have tried to establish link
  1148. * because the symbol error count will increment wildly if there
  1149. * is no link.
  1150. */
  1151. igb_clear_hw_cntrs_82575(hw);
  1152. return ret_val;
  1153. }
  1154. /**
  1155. * igb_setup_copper_link_82575 - Configure copper link settings
  1156. * @hw: pointer to the HW structure
  1157. *
  1158. * Configures the link for auto-neg or forced speed and duplex. Then we check
  1159. * for link, once link is established calls to configure collision distance
  1160. * and flow control are called.
  1161. **/
  1162. static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
  1163. {
  1164. u32 ctrl;
  1165. s32 ret_val;
  1166. u32 phpm_reg;
  1167. ctrl = rd32(E1000_CTRL);
  1168. ctrl |= E1000_CTRL_SLU;
  1169. ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
  1170. wr32(E1000_CTRL, ctrl);
  1171. /* Clear Go Link Disconnect bit */
  1172. if (hw->mac.type >= e1000_82580) {
  1173. phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
  1174. phpm_reg &= ~E1000_82580_PM_GO_LINKD;
  1175. wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
  1176. }
  1177. ret_val = igb_setup_serdes_link_82575(hw);
  1178. if (ret_val)
  1179. goto out;
  1180. if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
  1181. /* allow time for SFP cage time to power up phy */
  1182. msleep(300);
  1183. ret_val = hw->phy.ops.reset(hw);
  1184. if (ret_val) {
  1185. hw_dbg("Error resetting the PHY.\n");
  1186. goto out;
  1187. }
  1188. }
  1189. switch (hw->phy.type) {
  1190. case e1000_phy_i210:
  1191. case e1000_phy_m88:
  1192. switch (hw->phy.id) {
  1193. case I347AT4_E_PHY_ID:
  1194. case M88E1112_E_PHY_ID:
  1195. case M88E1545_E_PHY_ID:
  1196. case I210_I_PHY_ID:
  1197. ret_val = igb_copper_link_setup_m88_gen2(hw);
  1198. break;
  1199. default:
  1200. ret_val = igb_copper_link_setup_m88(hw);
  1201. break;
  1202. }
  1203. break;
  1204. case e1000_phy_igp_3:
  1205. ret_val = igb_copper_link_setup_igp(hw);
  1206. break;
  1207. case e1000_phy_82580:
  1208. ret_val = igb_copper_link_setup_82580(hw);
  1209. break;
  1210. default:
  1211. ret_val = -E1000_ERR_PHY;
  1212. break;
  1213. }
  1214. if (ret_val)
  1215. goto out;
  1216. ret_val = igb_setup_copper_link(hw);
  1217. out:
  1218. return ret_val;
  1219. }
  1220. /**
  1221. * igb_setup_serdes_link_82575 - Setup link for serdes
  1222. * @hw: pointer to the HW structure
  1223. *
  1224. * Configure the physical coding sub-layer (PCS) link. The PCS link is
  1225. * used on copper connections where the serialized gigabit media independent
  1226. * interface (sgmii), or serdes fiber is being used. Configures the link
  1227. * for auto-negotiation or forces speed/duplex.
  1228. **/
  1229. static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
  1230. {
  1231. u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
  1232. bool pcs_autoneg;
  1233. s32 ret_val = E1000_SUCCESS;
  1234. u16 data;
  1235. if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
  1236. !igb_sgmii_active_82575(hw))
  1237. return ret_val;
  1238. /* On the 82575, SerDes loopback mode persists until it is
  1239. * explicitly turned off or a power cycle is performed. A read to
  1240. * the register does not indicate its status. Therefore, we ensure
  1241. * loopback mode is disabled during initialization.
  1242. */
  1243. wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
  1244. /* power on the sfp cage if present and turn on I2C */
  1245. ctrl_ext = rd32(E1000_CTRL_EXT);
  1246. ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
  1247. ctrl_ext |= E1000_CTRL_I2C_ENA;
  1248. wr32(E1000_CTRL_EXT, ctrl_ext);
  1249. ctrl_reg = rd32(E1000_CTRL);
  1250. ctrl_reg |= E1000_CTRL_SLU;
  1251. if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
  1252. /* set both sw defined pins */
  1253. ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
  1254. /* Set switch control to serdes energy detect */
  1255. reg = rd32(E1000_CONNSW);
  1256. reg |= E1000_CONNSW_ENRGSRC;
  1257. wr32(E1000_CONNSW, reg);
  1258. }
  1259. reg = rd32(E1000_PCS_LCTL);
  1260. /* default pcs_autoneg to the same setting as mac autoneg */
  1261. pcs_autoneg = hw->mac.autoneg;
  1262. switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
  1263. case E1000_CTRL_EXT_LINK_MODE_SGMII:
  1264. /* sgmii mode lets the phy handle forcing speed/duplex */
  1265. pcs_autoneg = true;
  1266. /* autoneg time out should be disabled for SGMII mode */
  1267. reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
  1268. break;
  1269. case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
  1270. /* disable PCS autoneg and support parallel detect only */
  1271. pcs_autoneg = false;
  1272. default:
  1273. if (hw->mac.type == e1000_82575 ||
  1274. hw->mac.type == e1000_82576) {
  1275. ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
  1276. if (ret_val) {
  1277. printk(KERN_DEBUG "NVM Read Error\n\n");
  1278. return ret_val;
  1279. }
  1280. if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
  1281. pcs_autoneg = false;
  1282. }
  1283. /* non-SGMII modes only supports a speed of 1000/Full for the
  1284. * link so it is best to just force the MAC and let the pcs
  1285. * link either autoneg or be forced to 1000/Full
  1286. */
  1287. ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
  1288. E1000_CTRL_FD | E1000_CTRL_FRCDPX;
  1289. /* set speed of 1000/Full if speed/duplex is forced */
  1290. reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
  1291. break;
  1292. }
  1293. wr32(E1000_CTRL, ctrl_reg);
  1294. /* New SerDes mode allows for forcing speed or autonegotiating speed
  1295. * at 1gb. Autoneg should be default set by most drivers. This is the
  1296. * mode that will be compatible with older link partners and switches.
  1297. * However, both are supported by the hardware and some drivers/tools.
  1298. */
  1299. reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
  1300. E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
  1301. if (pcs_autoneg) {
  1302. /* Set PCS register for autoneg */
  1303. reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
  1304. E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
  1305. /* Disable force flow control for autoneg */
  1306. reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
  1307. /* Configure flow control advertisement for autoneg */
  1308. anadv_reg = rd32(E1000_PCS_ANADV);
  1309. anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
  1310. switch (hw->fc.requested_mode) {
  1311. case e1000_fc_full:
  1312. case e1000_fc_rx_pause:
  1313. anadv_reg |= E1000_TXCW_ASM_DIR;
  1314. anadv_reg |= E1000_TXCW_PAUSE;
  1315. break;
  1316. case e1000_fc_tx_pause:
  1317. anadv_reg |= E1000_TXCW_ASM_DIR;
  1318. break;
  1319. default:
  1320. break;
  1321. }
  1322. wr32(E1000_PCS_ANADV, anadv_reg);
  1323. hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
  1324. } else {
  1325. /* Set PCS register for forced link */
  1326. reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
  1327. /* Force flow control for forced link */
  1328. reg |= E1000_PCS_LCTL_FORCE_FCTRL;
  1329. hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
  1330. }
  1331. wr32(E1000_PCS_LCTL, reg);
  1332. if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
  1333. igb_force_mac_fc(hw);
  1334. return ret_val;
  1335. }
  1336. /**
  1337. * igb_sgmii_active_82575 - Return sgmii state
  1338. * @hw: pointer to the HW structure
  1339. *
  1340. * 82575 silicon has a serialized gigabit media independent interface (sgmii)
  1341. * which can be enabled for use in the embedded applications. Simply
  1342. * return the current state of the sgmii interface.
  1343. **/
  1344. static bool igb_sgmii_active_82575(struct e1000_hw *hw)
  1345. {
  1346. struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
  1347. return dev_spec->sgmii_active;
  1348. }
  1349. /**
  1350. * igb_reset_init_script_82575 - Inits HW defaults after reset
  1351. * @hw: pointer to the HW structure
  1352. *
  1353. * Inits recommended HW defaults after a reset when there is no EEPROM
  1354. * detected. This is only for the 82575.
  1355. **/
  1356. static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
  1357. {
  1358. if (hw->mac.type == e1000_82575) {
  1359. hw_dbg("Running reset init script for 82575\n");
  1360. /* SerDes configuration via SERDESCTRL */
  1361. igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
  1362. igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
  1363. igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
  1364. igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
  1365. /* CCM configuration via CCMCTL register */
  1366. igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
  1367. igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
  1368. /* PCIe lanes configuration */
  1369. igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
  1370. igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
  1371. igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
  1372. igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
  1373. /* PCIe PLL Configuration */
  1374. igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
  1375. igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
  1376. igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
  1377. }
  1378. return 0;
  1379. }
  1380. /**
  1381. * igb_read_mac_addr_82575 - Read device MAC address
  1382. * @hw: pointer to the HW structure
  1383. **/
  1384. static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
  1385. {
  1386. s32 ret_val = 0;
  1387. /* If there's an alternate MAC address place it in RAR0
  1388. * so that it will override the Si installed default perm
  1389. * address.
  1390. */
  1391. ret_val = igb_check_alt_mac_addr(hw);
  1392. if (ret_val)
  1393. goto out;
  1394. ret_val = igb_read_mac_addr(hw);
  1395. out:
  1396. return ret_val;
  1397. }
  1398. /**
  1399. * igb_power_down_phy_copper_82575 - Remove link during PHY power down
  1400. * @hw: pointer to the HW structure
  1401. *
  1402. * In the case of a PHY power down to save power, or to turn off link during a
  1403. * driver unload, or wake on lan is not enabled, remove the link.
  1404. **/
  1405. void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
  1406. {
  1407. /* If the management interface is not enabled, then power down */
  1408. if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
  1409. igb_power_down_phy_copper(hw);
  1410. }
  1411. /**
  1412. * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
  1413. * @hw: pointer to the HW structure
  1414. *
  1415. * Clears the hardware counters by reading the counter registers.
  1416. **/
  1417. static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
  1418. {
  1419. igb_clear_hw_cntrs_base(hw);
  1420. rd32(E1000_PRC64);
  1421. rd32(E1000_PRC127);
  1422. rd32(E1000_PRC255);
  1423. rd32(E1000_PRC511);
  1424. rd32(E1000_PRC1023);
  1425. rd32(E1000_PRC1522);
  1426. rd32(E1000_PTC64);
  1427. rd32(E1000_PTC127);
  1428. rd32(E1000_PTC255);
  1429. rd32(E1000_PTC511);
  1430. rd32(E1000_PTC1023);
  1431. rd32(E1000_PTC1522);
  1432. rd32(E1000_ALGNERRC);
  1433. rd32(E1000_RXERRC);
  1434. rd32(E1000_TNCRS);
  1435. rd32(E1000_CEXTERR);
  1436. rd32(E1000_TSCTC);
  1437. rd32(E1000_TSCTFC);
  1438. rd32(E1000_MGTPRC);
  1439. rd32(E1000_MGTPDC);
  1440. rd32(E1000_MGTPTC);
  1441. rd32(E1000_IAC);
  1442. rd32(E1000_ICRXOC);
  1443. rd32(E1000_ICRXPTC);
  1444. rd32(E1000_ICRXATC);
  1445. rd32(E1000_ICTXPTC);
  1446. rd32(E1000_ICTXATC);
  1447. rd32(E1000_ICTXQEC);
  1448. rd32(E1000_ICTXQMTC);
  1449. rd32(E1000_ICRXDMTC);
  1450. rd32(E1000_CBTMPC);
  1451. rd32(E1000_HTDPMC);
  1452. rd32(E1000_CBRMPC);
  1453. rd32(E1000_RPTHC);
  1454. rd32(E1000_HGPTC);
  1455. rd32(E1000_HTCBDPC);
  1456. rd32(E1000_HGORCL);
  1457. rd32(E1000_HGORCH);
  1458. rd32(E1000_HGOTCL);
  1459. rd32(E1000_HGOTCH);
  1460. rd32(E1000_LENERRS);
  1461. /* This register should not be read in copper configurations */
  1462. if (hw->phy.media_type == e1000_media_type_internal_serdes ||
  1463. igb_sgmii_active_82575(hw))
  1464. rd32(E1000_SCVPC);
  1465. }
  1466. /**
  1467. * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
  1468. * @hw: pointer to the HW structure
  1469. *
  1470. * After rx enable if managability is enabled then there is likely some
  1471. * bad data at the start of the fifo and possibly in the DMA fifo. This
  1472. * function clears the fifos and flushes any packets that came in as rx was
  1473. * being enabled.
  1474. **/
  1475. void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
  1476. {
  1477. u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
  1478. int i, ms_wait;
  1479. if (hw->mac.type != e1000_82575 ||
  1480. !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
  1481. return;
  1482. /* Disable all RX queues */
  1483. for (i = 0; i < 4; i++) {
  1484. rxdctl[i] = rd32(E1000_RXDCTL(i));
  1485. wr32(E1000_RXDCTL(i),
  1486. rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
  1487. }
  1488. /* Poll all queues to verify they have shut down */
  1489. for (ms_wait = 0; ms_wait < 10; ms_wait++) {
  1490. msleep(1);
  1491. rx_enabled = 0;
  1492. for (i = 0; i < 4; i++)
  1493. rx_enabled |= rd32(E1000_RXDCTL(i));
  1494. if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
  1495. break;
  1496. }
  1497. if (ms_wait == 10)
  1498. hw_dbg("Queue disable timed out after 10ms\n");
  1499. /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
  1500. * incoming packets are rejected. Set enable and wait 2ms so that
  1501. * any packet that was coming in as RCTL.EN was set is flushed
  1502. */
  1503. rfctl = rd32(E1000_RFCTL);
  1504. wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
  1505. rlpml = rd32(E1000_RLPML);
  1506. wr32(E1000_RLPML, 0);
  1507. rctl = rd32(E1000_RCTL);
  1508. temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
  1509. temp_rctl |= E1000_RCTL_LPE;
  1510. wr32(E1000_RCTL, temp_rctl);
  1511. wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
  1512. wrfl();
  1513. msleep(2);
  1514. /* Enable RX queues that were previously enabled and restore our
  1515. * previous state
  1516. */
  1517. for (i = 0; i < 4; i++)
  1518. wr32(E1000_RXDCTL(i), rxdctl[i]);
  1519. wr32(E1000_RCTL, rctl);
  1520. wrfl();
  1521. wr32(E1000_RLPML, rlpml);
  1522. wr32(E1000_RFCTL, rfctl);
  1523. /* Flush receive errors generated by workaround */
  1524. rd32(E1000_ROC);
  1525. rd32(E1000_RNBC);
  1526. rd32(E1000_MPC);
  1527. }
  1528. /**
  1529. * igb_set_pcie_completion_timeout - set pci-e completion timeout
  1530. * @hw: pointer to the HW structure
  1531. *
  1532. * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
  1533. * however the hardware default for these parts is 500us to 1ms which is less
  1534. * than the 10ms recommended by the pci-e spec. To address this we need to
  1535. * increase the value to either 10ms to 200ms for capability version 1 config,
  1536. * or 16ms to 55ms for version 2.
  1537. **/
  1538. static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
  1539. {
  1540. u32 gcr = rd32(E1000_GCR);
  1541. s32 ret_val = 0;
  1542. u16 pcie_devctl2;
  1543. /* only take action if timeout value is defaulted to 0 */
  1544. if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
  1545. goto out;
  1546. /* if capabilities version is type 1 we can write the
  1547. * timeout of 10ms to 200ms through the GCR register
  1548. */
  1549. if (!(gcr & E1000_GCR_CAP_VER2)) {
  1550. gcr |= E1000_GCR_CMPL_TMOUT_10ms;
  1551. goto out;
  1552. }
  1553. /* for version 2 capabilities we need to write the config space
  1554. * directly in order to set the completion timeout value for
  1555. * 16ms to 55ms
  1556. */
  1557. ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
  1558. &pcie_devctl2);
  1559. if (ret_val)
  1560. goto out;
  1561. pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
  1562. ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
  1563. &pcie_devctl2);
  1564. out:
  1565. /* disable completion timeout resend */
  1566. gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
  1567. wr32(E1000_GCR, gcr);
  1568. return ret_val;
  1569. }
  1570. /**
  1571. * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
  1572. * @hw: pointer to the hardware struct
  1573. * @enable: state to enter, either enabled or disabled
  1574. * @pf: Physical Function pool - do not set anti-spoofing for the PF
  1575. *
  1576. * enables/disables L2 switch anti-spoofing functionality.
  1577. **/
  1578. void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
  1579. {
  1580. u32 reg_val, reg_offset;
  1581. switch (hw->mac.type) {
  1582. case e1000_82576:
  1583. reg_offset = E1000_DTXSWC;
  1584. break;
  1585. case e1000_i350:
  1586. case e1000_i354:
  1587. reg_offset = E1000_TXSWC;
  1588. break;
  1589. default:
  1590. return;
  1591. }
  1592. reg_val = rd32(reg_offset);
  1593. if (enable) {
  1594. reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
  1595. E1000_DTXSWC_VLAN_SPOOF_MASK);
  1596. /* The PF can spoof - it has to in order to
  1597. * support emulation mode NICs
  1598. */
  1599. reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
  1600. } else {
  1601. reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
  1602. E1000_DTXSWC_VLAN_SPOOF_MASK);
  1603. }
  1604. wr32(reg_offset, reg_val);
  1605. }
  1606. /**
  1607. * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
  1608. * @hw: pointer to the hardware struct
  1609. * @enable: state to enter, either enabled or disabled
  1610. *
  1611. * enables/disables L2 switch loopback functionality.
  1612. **/
  1613. void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
  1614. {
  1615. u32 dtxswc;
  1616. switch (hw->mac.type) {
  1617. case e1000_82576:
  1618. dtxswc = rd32(E1000_DTXSWC);
  1619. if (enable)
  1620. dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
  1621. else
  1622. dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
  1623. wr32(E1000_DTXSWC, dtxswc);
  1624. break;
  1625. case e1000_i354:
  1626. case e1000_i350:
  1627. dtxswc = rd32(E1000_TXSWC);
  1628. if (enable)
  1629. dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
  1630. else
  1631. dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
  1632. wr32(E1000_TXSWC, dtxswc);
  1633. break;
  1634. default:
  1635. /* Currently no other hardware supports loopback */
  1636. break;
  1637. }
  1638. }
  1639. /**
  1640. * igb_vmdq_set_replication_pf - enable or disable vmdq replication
  1641. * @hw: pointer to the hardware struct
  1642. * @enable: state to enter, either enabled or disabled
  1643. *
  1644. * enables/disables replication of packets across multiple pools.
  1645. **/
  1646. void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
  1647. {
  1648. u32 vt_ctl = rd32(E1000_VT_CTL);
  1649. if (enable)
  1650. vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
  1651. else
  1652. vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
  1653. wr32(E1000_VT_CTL, vt_ctl);
  1654. }
  1655. /**
  1656. * igb_read_phy_reg_82580 - Read 82580 MDI control register
  1657. * @hw: pointer to the HW structure
  1658. * @offset: register offset to be read
  1659. * @data: pointer to the read data
  1660. *
  1661. * Reads the MDI control register in the PHY at offset and stores the
  1662. * information read to data.
  1663. **/
  1664. static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
  1665. {
  1666. s32 ret_val;
  1667. ret_val = hw->phy.ops.acquire(hw);
  1668. if (ret_val)
  1669. goto out;
  1670. ret_val = igb_read_phy_reg_mdic(hw, offset, data);
  1671. hw->phy.ops.release(hw);
  1672. out:
  1673. return ret_val;
  1674. }
  1675. /**
  1676. * igb_write_phy_reg_82580 - Write 82580 MDI control register
  1677. * @hw: pointer to the HW structure
  1678. * @offset: register offset to write to
  1679. * @data: data to write to register at offset
  1680. *
  1681. * Writes data to MDI control register in the PHY at offset.
  1682. **/
  1683. static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
  1684. {
  1685. s32 ret_val;
  1686. ret_val = hw->phy.ops.acquire(hw);
  1687. if (ret_val)
  1688. goto out;
  1689. ret_val = igb_write_phy_reg_mdic(hw, offset, data);
  1690. hw->phy.ops.release(hw);
  1691. out:
  1692. return ret_val;
  1693. }
  1694. /**
  1695. * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
  1696. * @hw: pointer to the HW structure
  1697. *
  1698. * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
  1699. * the values found in the EEPROM. This addresses an issue in which these
  1700. * bits are not restored from EEPROM after reset.
  1701. **/
  1702. static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
  1703. {
  1704. s32 ret_val = 0;
  1705. u32 mdicnfg;
  1706. u16 nvm_data = 0;
  1707. if (hw->mac.type != e1000_82580)
  1708. goto out;
  1709. if (!igb_sgmii_active_82575(hw))
  1710. goto out;
  1711. ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
  1712. NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
  1713. &nvm_data);
  1714. if (ret_val) {
  1715. hw_dbg("NVM Read Error\n");
  1716. goto out;
  1717. }
  1718. mdicnfg = rd32(E1000_MDICNFG);
  1719. if (nvm_data & NVM_WORD24_EXT_MDIO)
  1720. mdicnfg |= E1000_MDICNFG_EXT_MDIO;
  1721. if (nvm_data & NVM_WORD24_COM_MDIO)
  1722. mdicnfg |= E1000_MDICNFG_COM_MDIO;
  1723. wr32(E1000_MDICNFG, mdicnfg);
  1724. out:
  1725. return ret_val;
  1726. }
  1727. /**
  1728. * igb_reset_hw_82580 - Reset hardware
  1729. * @hw: pointer to the HW structure
  1730. *
  1731. * This resets function or entire device (all ports, etc.)
  1732. * to a known state.
  1733. **/
  1734. static s32 igb_reset_hw_82580(struct e1000_hw *hw)
  1735. {
  1736. s32 ret_val = 0;
  1737. /* BH SW mailbox bit in SW_FW_SYNC */
  1738. u16 swmbsw_mask = E1000_SW_SYNCH_MB;
  1739. u32 ctrl, icr;
  1740. bool global_device_reset = hw->dev_spec._82575.global_device_reset;
  1741. hw->dev_spec._82575.global_device_reset = false;
  1742. /* due to hw errata, global device reset doesn't always
  1743. * work on 82580
  1744. */
  1745. if (hw->mac.type == e1000_82580)
  1746. global_device_reset = false;
  1747. /* Get current control state. */
  1748. ctrl = rd32(E1000_CTRL);
  1749. /* Prevent the PCI-E bus from sticking if there is no TLP connection
  1750. * on the last TLP read/write transaction when MAC is reset.
  1751. */
  1752. ret_val = igb_disable_pcie_master(hw);
  1753. if (ret_val)
  1754. hw_dbg("PCI-E Master disable polling has failed.\n");
  1755. hw_dbg("Masking off all interrupts\n");
  1756. wr32(E1000_IMC, 0xffffffff);
  1757. wr32(E1000_RCTL, 0);
  1758. wr32(E1000_TCTL, E1000_TCTL_PSP);
  1759. wrfl();
  1760. msleep(10);
  1761. /* Determine whether or not a global dev reset is requested */
  1762. if (global_device_reset &&
  1763. hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
  1764. global_device_reset = false;
  1765. if (global_device_reset &&
  1766. !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
  1767. ctrl |= E1000_CTRL_DEV_RST;
  1768. else
  1769. ctrl |= E1000_CTRL_RST;
  1770. wr32(E1000_CTRL, ctrl);
  1771. wrfl();
  1772. /* Add delay to insure DEV_RST has time to complete */
  1773. if (global_device_reset)
  1774. msleep(5);
  1775. ret_val = igb_get_auto_rd_done(hw);
  1776. if (ret_val) {
  1777. /* When auto config read does not complete, do not
  1778. * return with an error. This can happen in situations
  1779. * where there is no eeprom and prevents getting link.
  1780. */
  1781. hw_dbg("Auto Read Done did not complete\n");
  1782. }
  1783. /* If EEPROM is not present, run manual init scripts */
  1784. if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
  1785. igb_reset_init_script_82575(hw);
  1786. /* clear global device reset status bit */
  1787. wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
  1788. /* Clear any pending interrupt events. */
  1789. wr32(E1000_IMC, 0xffffffff);
  1790. icr = rd32(E1000_ICR);
  1791. ret_val = igb_reset_mdicnfg_82580(hw);
  1792. if (ret_val)
  1793. hw_dbg("Could not reset MDICNFG based on EEPROM\n");
  1794. /* Install any alternate MAC address into RAR0 */
  1795. ret_val = igb_check_alt_mac_addr(hw);
  1796. /* Release semaphore */
  1797. if (global_device_reset)
  1798. hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
  1799. return ret_val;
  1800. }
  1801. /**
  1802. * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
  1803. * @data: data received by reading RXPBS register
  1804. *
  1805. * The 82580 uses a table based approach for packet buffer allocation sizes.
  1806. * This function converts the retrieved value into the correct table value
  1807. * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
  1808. * 0x0 36 72 144 1 2 4 8 16
  1809. * 0x8 35 70 140 rsv rsv rsv rsv rsv
  1810. */
  1811. u16 igb_rxpbs_adjust_82580(u32 data)
  1812. {
  1813. u16 ret_val = 0;
  1814. if (data < E1000_82580_RXPBS_TABLE_SIZE)
  1815. ret_val = e1000_82580_rxpbs_table[data];
  1816. return ret_val;
  1817. }
  1818. /**
  1819. * igb_validate_nvm_checksum_with_offset - Validate EEPROM
  1820. * checksum
  1821. * @hw: pointer to the HW structure
  1822. * @offset: offset in words of the checksum protected region
  1823. *
  1824. * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
  1825. * and then verifies that the sum of the EEPROM is equal to 0xBABA.
  1826. **/
  1827. static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
  1828. u16 offset)
  1829. {
  1830. s32 ret_val = 0;
  1831. u16 checksum = 0;
  1832. u16 i, nvm_data;
  1833. for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
  1834. ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
  1835. if (ret_val) {
  1836. hw_dbg("NVM Read Error\n");
  1837. goto out;
  1838. }
  1839. checksum += nvm_data;
  1840. }
  1841. if (checksum != (u16) NVM_SUM) {
  1842. hw_dbg("NVM Checksum Invalid\n");
  1843. ret_val = -E1000_ERR_NVM;
  1844. goto out;
  1845. }
  1846. out:
  1847. return ret_val;
  1848. }
  1849. /**
  1850. * igb_update_nvm_checksum_with_offset - Update EEPROM
  1851. * checksum
  1852. * @hw: pointer to the HW structure
  1853. * @offset: offset in words of the checksum protected region
  1854. *
  1855. * Updates the EEPROM checksum by reading/adding each word of the EEPROM
  1856. * up to the checksum. Then calculates the EEPROM checksum and writes the
  1857. * value to the EEPROM.
  1858. **/
  1859. static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
  1860. {
  1861. s32 ret_val;
  1862. u16 checksum = 0;
  1863. u16 i, nvm_data;
  1864. for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
  1865. ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
  1866. if (ret_val) {
  1867. hw_dbg("NVM Read Error while updating checksum.\n");
  1868. goto out;
  1869. }
  1870. checksum += nvm_data;
  1871. }
  1872. checksum = (u16) NVM_SUM - checksum;
  1873. ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
  1874. &checksum);
  1875. if (ret_val)
  1876. hw_dbg("NVM Write Error while updating checksum.\n");
  1877. out:
  1878. return ret_val;
  1879. }
  1880. /**
  1881. * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
  1882. * @hw: pointer to the HW structure
  1883. *
  1884. * Calculates the EEPROM section checksum by reading/adding each word of
  1885. * the EEPROM and then verifies that the sum of the EEPROM is
  1886. * equal to 0xBABA.
  1887. **/
  1888. static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
  1889. {
  1890. s32 ret_val = 0;
  1891. u16 eeprom_regions_count = 1;
  1892. u16 j, nvm_data;
  1893. u16 nvm_offset;
  1894. ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
  1895. if (ret_val) {
  1896. hw_dbg("NVM Read Error\n");
  1897. goto out;
  1898. }
  1899. if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
  1900. /* if checksums compatibility bit is set validate checksums
  1901. * for all 4 ports.
  1902. */
  1903. eeprom_regions_count = 4;
  1904. }
  1905. for (j = 0; j < eeprom_regions_count; j++) {
  1906. nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
  1907. ret_val = igb_validate_nvm_checksum_with_offset(hw,
  1908. nvm_offset);
  1909. if (ret_val != 0)
  1910. goto out;
  1911. }
  1912. out:
  1913. return ret_val;
  1914. }
  1915. /**
  1916. * igb_update_nvm_checksum_82580 - Update EEPROM checksum
  1917. * @hw: pointer to the HW structure
  1918. *
  1919. * Updates the EEPROM section checksums for all 4 ports by reading/adding
  1920. * each word of the EEPROM up to the checksum. Then calculates the EEPROM
  1921. * checksum and writes the value to the EEPROM.
  1922. **/
  1923. static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
  1924. {
  1925. s32 ret_val;
  1926. u16 j, nvm_data;
  1927. u16 nvm_offset;
  1928. ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
  1929. if (ret_val) {
  1930. hw_dbg("NVM Read Error while updating checksum"
  1931. " compatibility bit.\n");
  1932. goto out;
  1933. }
  1934. if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
  1935. /* set compatibility bit to validate checksums appropriately */
  1936. nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
  1937. ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
  1938. &nvm_data);
  1939. if (ret_val) {
  1940. hw_dbg("NVM Write Error while updating checksum"
  1941. " compatibility bit.\n");
  1942. goto out;
  1943. }
  1944. }
  1945. for (j = 0; j < 4; j++) {
  1946. nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
  1947. ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
  1948. if (ret_val)
  1949. goto out;
  1950. }
  1951. out:
  1952. return ret_val;
  1953. }
  1954. /**
  1955. * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
  1956. * @hw: pointer to the HW structure
  1957. *
  1958. * Calculates the EEPROM section checksum by reading/adding each word of
  1959. * the EEPROM and then verifies that the sum of the EEPROM is
  1960. * equal to 0xBABA.
  1961. **/
  1962. static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
  1963. {
  1964. s32 ret_val = 0;
  1965. u16 j;
  1966. u16 nvm_offset;
  1967. for (j = 0; j < 4; j++) {
  1968. nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
  1969. ret_val = igb_validate_nvm_checksum_with_offset(hw,
  1970. nvm_offset);
  1971. if (ret_val != 0)
  1972. goto out;
  1973. }
  1974. out:
  1975. return ret_val;
  1976. }
  1977. /**
  1978. * igb_update_nvm_checksum_i350 - Update EEPROM checksum
  1979. * @hw: pointer to the HW structure
  1980. *
  1981. * Updates the EEPROM section checksums for all 4 ports by reading/adding
  1982. * each word of the EEPROM up to the checksum. Then calculates the EEPROM
  1983. * checksum and writes the value to the EEPROM.
  1984. **/
  1985. static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
  1986. {
  1987. s32 ret_val = 0;
  1988. u16 j;
  1989. u16 nvm_offset;
  1990. for (j = 0; j < 4; j++) {
  1991. nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
  1992. ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
  1993. if (ret_val != 0)
  1994. goto out;
  1995. }
  1996. out:
  1997. return ret_val;
  1998. }
  1999. /**
  2000. * __igb_access_emi_reg - Read/write EMI register
  2001. * @hw: pointer to the HW structure
  2002. * @addr: EMI address to program
  2003. * @data: pointer to value to read/write from/to the EMI address
  2004. * @read: boolean flag to indicate read or write
  2005. **/
  2006. static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
  2007. u16 *data, bool read)
  2008. {
  2009. s32 ret_val = E1000_SUCCESS;
  2010. ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
  2011. if (ret_val)
  2012. return ret_val;
  2013. if (read)
  2014. ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
  2015. else
  2016. ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
  2017. return ret_val;
  2018. }
  2019. /**
  2020. * igb_read_emi_reg - Read Extended Management Interface register
  2021. * @hw: pointer to the HW structure
  2022. * @addr: EMI address to program
  2023. * @data: value to be read from the EMI address
  2024. **/
  2025. s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
  2026. {
  2027. return __igb_access_emi_reg(hw, addr, data, true);
  2028. }
  2029. /**
  2030. * igb_set_eee_i350 - Enable/disable EEE support
  2031. * @hw: pointer to the HW structure
  2032. *
  2033. * Enable/disable EEE based on setting in dev_spec structure.
  2034. *
  2035. **/
  2036. s32 igb_set_eee_i350(struct e1000_hw *hw)
  2037. {
  2038. s32 ret_val = 0;
  2039. u32 ipcnfg, eeer;
  2040. if ((hw->mac.type < e1000_i350) ||
  2041. (hw->phy.media_type != e1000_media_type_copper))
  2042. goto out;
  2043. ipcnfg = rd32(E1000_IPCNFG);
  2044. eeer = rd32(E1000_EEER);
  2045. /* enable or disable per user setting */
  2046. if (!(hw->dev_spec._82575.eee_disable)) {
  2047. u32 eee_su = rd32(E1000_EEE_SU);
  2048. ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
  2049. eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
  2050. E1000_EEER_LPI_FC);
  2051. /* This bit should not be set in normal operation. */
  2052. if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
  2053. hw_dbg("LPI Clock Stop Bit should not be set!\n");
  2054. } else {
  2055. ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
  2056. E1000_IPCNFG_EEE_100M_AN);
  2057. eeer &= ~(E1000_EEER_TX_LPI_EN |
  2058. E1000_EEER_RX_LPI_EN |
  2059. E1000_EEER_LPI_FC);
  2060. }
  2061. wr32(E1000_IPCNFG, ipcnfg);
  2062. wr32(E1000_EEER, eeer);
  2063. rd32(E1000_IPCNFG);
  2064. rd32(E1000_EEER);
  2065. out:
  2066. return ret_val;
  2067. }
  2068. /**
  2069. * igb_set_eee_i354 - Enable/disable EEE support
  2070. * @hw: pointer to the HW structure
  2071. *
  2072. * Enable/disable EEE legacy mode based on setting in dev_spec structure.
  2073. *
  2074. **/
  2075. s32 igb_set_eee_i354(struct e1000_hw *hw)
  2076. {
  2077. struct e1000_phy_info *phy = &hw->phy;
  2078. s32 ret_val = 0;
  2079. u16 phy_data;
  2080. if ((hw->phy.media_type != e1000_media_type_copper) ||
  2081. (phy->id != M88E1545_E_PHY_ID))
  2082. goto out;
  2083. if (!hw->dev_spec._82575.eee_disable) {
  2084. /* Switch to PHY page 18. */
  2085. ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18);
  2086. if (ret_val)
  2087. goto out;
  2088. ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1,
  2089. &phy_data);
  2090. if (ret_val)
  2091. goto out;
  2092. phy_data |= E1000_M88E1545_EEE_CTRL_1_MS;
  2093. ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1,
  2094. phy_data);
  2095. if (ret_val)
  2096. goto out;
  2097. /* Return the PHY to page 0. */
  2098. ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0);
  2099. if (ret_val)
  2100. goto out;
  2101. /* Turn on EEE advertisement. */
  2102. ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
  2103. E1000_EEE_ADV_DEV_I354,
  2104. &phy_data);
  2105. if (ret_val)
  2106. goto out;
  2107. phy_data |= E1000_EEE_ADV_100_SUPPORTED |
  2108. E1000_EEE_ADV_1000_SUPPORTED;
  2109. ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
  2110. E1000_EEE_ADV_DEV_I354,
  2111. phy_data);
  2112. } else {
  2113. /* Turn off EEE advertisement. */
  2114. ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
  2115. E1000_EEE_ADV_DEV_I354,
  2116. &phy_data);
  2117. if (ret_val)
  2118. goto out;
  2119. phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
  2120. E1000_EEE_ADV_1000_SUPPORTED);
  2121. ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
  2122. E1000_EEE_ADV_DEV_I354,
  2123. phy_data);
  2124. }
  2125. out:
  2126. return ret_val;
  2127. }
  2128. /**
  2129. * igb_get_eee_status_i354 - Get EEE status
  2130. * @hw: pointer to the HW structure
  2131. * @status: EEE status
  2132. *
  2133. * Get EEE status by guessing based on whether Tx or Rx LPI indications have
  2134. * been received.
  2135. **/
  2136. s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
  2137. {
  2138. struct e1000_phy_info *phy = &hw->phy;
  2139. s32 ret_val = 0;
  2140. u16 phy_data;
  2141. /* Check if EEE is supported on this device. */
  2142. if ((hw->phy.media_type != e1000_media_type_copper) ||
  2143. (phy->id != M88E1545_E_PHY_ID))
  2144. goto out;
  2145. ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
  2146. E1000_PCS_STATUS_DEV_I354,
  2147. &phy_data);
  2148. if (ret_val)
  2149. goto out;
  2150. *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
  2151. E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
  2152. out:
  2153. return ret_val;
  2154. }
  2155. static const u8 e1000_emc_temp_data[4] = {
  2156. E1000_EMC_INTERNAL_DATA,
  2157. E1000_EMC_DIODE1_DATA,
  2158. E1000_EMC_DIODE2_DATA,
  2159. E1000_EMC_DIODE3_DATA
  2160. };
  2161. static const u8 e1000_emc_therm_limit[4] = {
  2162. E1000_EMC_INTERNAL_THERM_LIMIT,
  2163. E1000_EMC_DIODE1_THERM_LIMIT,
  2164. E1000_EMC_DIODE2_THERM_LIMIT,
  2165. E1000_EMC_DIODE3_THERM_LIMIT
  2166. };
  2167. /**
  2168. * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
  2169. * @hw: pointer to hardware structure
  2170. *
  2171. * Updates the temperatures in mac.thermal_sensor_data
  2172. **/
  2173. s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
  2174. {
  2175. s32 status = E1000_SUCCESS;
  2176. u16 ets_offset;
  2177. u16 ets_cfg;
  2178. u16 ets_sensor;
  2179. u8 num_sensors;
  2180. u8 sensor_index;
  2181. u8 sensor_location;
  2182. u8 i;
  2183. struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
  2184. if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
  2185. return E1000_NOT_IMPLEMENTED;
  2186. data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
  2187. /* Return the internal sensor only if ETS is unsupported */
  2188. hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
  2189. if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
  2190. return status;
  2191. hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
  2192. if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
  2193. != NVM_ETS_TYPE_EMC)
  2194. return E1000_NOT_IMPLEMENTED;
  2195. num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
  2196. if (num_sensors > E1000_MAX_SENSORS)
  2197. num_sensors = E1000_MAX_SENSORS;
  2198. for (i = 1; i < num_sensors; i++) {
  2199. hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
  2200. sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
  2201. NVM_ETS_DATA_INDEX_SHIFT);
  2202. sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
  2203. NVM_ETS_DATA_LOC_SHIFT);
  2204. if (sensor_location != 0)
  2205. hw->phy.ops.read_i2c_byte(hw,
  2206. e1000_emc_temp_data[sensor_index],
  2207. E1000_I2C_THERMAL_SENSOR_ADDR,
  2208. &data->sensor[i].temp);
  2209. }
  2210. return status;
  2211. }
  2212. /**
  2213. * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
  2214. * @hw: pointer to hardware structure
  2215. *
  2216. * Sets the thermal sensor thresholds according to the NVM map
  2217. * and save off the threshold and location values into mac.thermal_sensor_data
  2218. **/
  2219. s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
  2220. {
  2221. s32 status = E1000_SUCCESS;
  2222. u16 ets_offset;
  2223. u16 ets_cfg;
  2224. u16 ets_sensor;
  2225. u8 low_thresh_delta;
  2226. u8 num_sensors;
  2227. u8 sensor_index;
  2228. u8 sensor_location;
  2229. u8 therm_limit;
  2230. u8 i;
  2231. struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
  2232. if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
  2233. return E1000_NOT_IMPLEMENTED;
  2234. memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
  2235. data->sensor[0].location = 0x1;
  2236. data->sensor[0].caution_thresh =
  2237. (rd32(E1000_THHIGHTC) & 0xFF);
  2238. data->sensor[0].max_op_thresh =
  2239. (rd32(E1000_THLOWTC) & 0xFF);
  2240. /* Return the internal sensor only if ETS is unsupported */
  2241. hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
  2242. if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
  2243. return status;
  2244. hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
  2245. if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
  2246. != NVM_ETS_TYPE_EMC)
  2247. return E1000_NOT_IMPLEMENTED;
  2248. low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
  2249. NVM_ETS_LTHRES_DELTA_SHIFT);
  2250. num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
  2251. for (i = 1; i <= num_sensors; i++) {
  2252. hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
  2253. sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
  2254. NVM_ETS_DATA_INDEX_SHIFT);
  2255. sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
  2256. NVM_ETS_DATA_LOC_SHIFT);
  2257. therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
  2258. hw->phy.ops.write_i2c_byte(hw,
  2259. e1000_emc_therm_limit[sensor_index],
  2260. E1000_I2C_THERMAL_SENSOR_ADDR,
  2261. therm_limit);
  2262. if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
  2263. data->sensor[i].location = sensor_location;
  2264. data->sensor[i].caution_thresh = therm_limit;
  2265. data->sensor[i].max_op_thresh = therm_limit -
  2266. low_thresh_delta;
  2267. }
  2268. }
  2269. return status;
  2270. }
  2271. static struct e1000_mac_operations e1000_mac_ops_82575 = {
  2272. .init_hw = igb_init_hw_82575,
  2273. .check_for_link = igb_check_for_link_82575,
  2274. .rar_set = igb_rar_set,
  2275. .read_mac_addr = igb_read_mac_addr_82575,
  2276. .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
  2277. #ifdef CONFIG_IGB_HWMON
  2278. .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
  2279. .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
  2280. #endif
  2281. };
  2282. static struct e1000_phy_operations e1000_phy_ops_82575 = {
  2283. .acquire = igb_acquire_phy_82575,
  2284. .get_cfg_done = igb_get_cfg_done_82575,
  2285. .release = igb_release_phy_82575,
  2286. .write_i2c_byte = igb_write_i2c_byte,
  2287. .read_i2c_byte = igb_read_i2c_byte,
  2288. };
  2289. static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
  2290. .acquire = igb_acquire_nvm_82575,
  2291. .read = igb_read_nvm_eerd,
  2292. .release = igb_release_nvm_82575,
  2293. .write = igb_write_nvm_spi,
  2294. };
  2295. const struct e1000_info e1000_82575_info = {
  2296. .get_invariants = igb_get_invariants_82575,
  2297. .mac_ops = &e1000_mac_ops_82575,
  2298. .phy_ops = &e1000_phy_ops_82575,
  2299. .nvm_ops = &e1000_nvm_ops_82575,
  2300. };