mv643xx_eth.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045
  1. /*
  2. * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
  3. * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
  4. *
  5. * Based on the 64360 driver from:
  6. * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
  7. * Rabeeh Khoury <rabeeh@marvell.com>
  8. *
  9. * Copyright (C) 2003 PMC-Sierra, Inc.,
  10. * written by Manish Lachwani
  11. *
  12. * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
  13. *
  14. * Copyright (C) 2004-2006 MontaVista Software, Inc.
  15. * Dale Farnsworth <dale@farnsworth.org>
  16. *
  17. * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
  18. * <sjhill@realitydiluted.com>
  19. *
  20. * Copyright (C) 2007-2008 Marvell Semiconductor
  21. * Lennert Buytenhek <buytenh@marvell.com>
  22. *
  23. * This program is free software; you can redistribute it and/or
  24. * modify it under the terms of the GNU General Public License
  25. * as published by the Free Software Foundation; either version 2
  26. * of the License, or (at your option) any later version.
  27. *
  28. * This program is distributed in the hope that it will be useful,
  29. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  31. * GNU General Public License for more details.
  32. *
  33. * You should have received a copy of the GNU General Public License
  34. * along with this program; if not, write to the Free Software
  35. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  36. */
  37. #include <linux/init.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/in.h>
  40. #include <linux/ip.h>
  41. #include <linux/tcp.h>
  42. #include <linux/udp.h>
  43. #include <linux/etherdevice.h>
  44. #include <linux/delay.h>
  45. #include <linux/ethtool.h>
  46. #include <linux/platform_device.h>
  47. #include <linux/module.h>
  48. #include <linux/kernel.h>
  49. #include <linux/spinlock.h>
  50. #include <linux/workqueue.h>
  51. #include <linux/phy.h>
  52. #include <linux/mv643xx_eth.h>
  53. #include <linux/io.h>
  54. #include <linux/types.h>
  55. #include <linux/inet_lro.h>
  56. #include <asm/system.h>
  57. static char mv643xx_eth_driver_name[] = "mv643xx_eth";
  58. static char mv643xx_eth_driver_version[] = "1.4";
  59. /*
  60. * Registers shared between all ports.
  61. */
  62. #define PHY_ADDR 0x0000
  63. #define SMI_REG 0x0004
  64. #define SMI_BUSY 0x10000000
  65. #define SMI_READ_VALID 0x08000000
  66. #define SMI_OPCODE_READ 0x04000000
  67. #define SMI_OPCODE_WRITE 0x00000000
  68. #define ERR_INT_CAUSE 0x0080
  69. #define ERR_INT_SMI_DONE 0x00000010
  70. #define ERR_INT_MASK 0x0084
  71. #define WINDOW_BASE(w) (0x0200 + ((w) << 3))
  72. #define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
  73. #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
  74. #define WINDOW_BAR_ENABLE 0x0290
  75. #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
  76. /*
  77. * Main per-port registers. These live at offset 0x0400 for
  78. * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
  79. */
  80. #define PORT_CONFIG 0x0000
  81. #define UNICAST_PROMISCUOUS_MODE 0x00000001
  82. #define PORT_CONFIG_EXT 0x0004
  83. #define MAC_ADDR_LOW 0x0014
  84. #define MAC_ADDR_HIGH 0x0018
  85. #define SDMA_CONFIG 0x001c
  86. #define PORT_SERIAL_CONTROL 0x003c
  87. #define PORT_STATUS 0x0044
  88. #define TX_FIFO_EMPTY 0x00000400
  89. #define TX_IN_PROGRESS 0x00000080
  90. #define PORT_SPEED_MASK 0x00000030
  91. #define PORT_SPEED_1000 0x00000010
  92. #define PORT_SPEED_100 0x00000020
  93. #define PORT_SPEED_10 0x00000000
  94. #define FLOW_CONTROL_ENABLED 0x00000008
  95. #define FULL_DUPLEX 0x00000004
  96. #define LINK_UP 0x00000002
  97. #define TXQ_COMMAND 0x0048
  98. #define TXQ_FIX_PRIO_CONF 0x004c
  99. #define TX_BW_RATE 0x0050
  100. #define TX_BW_MTU 0x0058
  101. #define TX_BW_BURST 0x005c
  102. #define INT_CAUSE 0x0060
  103. #define INT_TX_END 0x07f80000
  104. #define INT_RX 0x000003fc
  105. #define INT_EXT 0x00000002
  106. #define INT_CAUSE_EXT 0x0064
  107. #define INT_EXT_LINK_PHY 0x00110000
  108. #define INT_EXT_TX 0x000000ff
  109. #define INT_MASK 0x0068
  110. #define INT_MASK_EXT 0x006c
  111. #define TX_FIFO_URGENT_THRESHOLD 0x0074
  112. #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
  113. #define TX_BW_RATE_MOVED 0x00e0
  114. #define TX_BW_MTU_MOVED 0x00e8
  115. #define TX_BW_BURST_MOVED 0x00ec
  116. #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
  117. #define RXQ_COMMAND 0x0280
  118. #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
  119. #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
  120. #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
  121. #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
  122. /*
  123. * Misc per-port registers.
  124. */
  125. #define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
  126. #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
  127. #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
  128. #define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
  129. /*
  130. * SDMA configuration register.
  131. */
  132. #define RX_BURST_SIZE_4_64BIT (2 << 1)
  133. #define RX_BURST_SIZE_16_64BIT (4 << 1)
  134. #define BLM_RX_NO_SWAP (1 << 4)
  135. #define BLM_TX_NO_SWAP (1 << 5)
  136. #define TX_BURST_SIZE_4_64BIT (2 << 22)
  137. #define TX_BURST_SIZE_16_64BIT (4 << 22)
  138. #if defined(__BIG_ENDIAN)
  139. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  140. (RX_BURST_SIZE_4_64BIT | \
  141. TX_BURST_SIZE_4_64BIT)
  142. #elif defined(__LITTLE_ENDIAN)
  143. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  144. (RX_BURST_SIZE_4_64BIT | \
  145. BLM_RX_NO_SWAP | \
  146. BLM_TX_NO_SWAP | \
  147. TX_BURST_SIZE_4_64BIT)
  148. #else
  149. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  150. #endif
  151. /*
  152. * Port serial control register.
  153. */
  154. #define SET_MII_SPEED_TO_100 (1 << 24)
  155. #define SET_GMII_SPEED_TO_1000 (1 << 23)
  156. #define SET_FULL_DUPLEX_MODE (1 << 21)
  157. #define MAX_RX_PACKET_9700BYTE (5 << 17)
  158. #define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
  159. #define DO_NOT_FORCE_LINK_FAIL (1 << 10)
  160. #define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
  161. #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
  162. #define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2)
  163. #define FORCE_LINK_PASS (1 << 1)
  164. #define SERIAL_PORT_ENABLE (1 << 0)
  165. #define DEFAULT_RX_QUEUE_SIZE 128
  166. #define DEFAULT_TX_QUEUE_SIZE 256
  167. /*
  168. * RX/TX descriptors.
  169. */
  170. #if defined(__BIG_ENDIAN)
  171. struct rx_desc {
  172. u16 byte_cnt; /* Descriptor buffer byte count */
  173. u16 buf_size; /* Buffer size */
  174. u32 cmd_sts; /* Descriptor command status */
  175. u32 next_desc_ptr; /* Next descriptor pointer */
  176. u32 buf_ptr; /* Descriptor buffer pointer */
  177. };
  178. struct tx_desc {
  179. u16 byte_cnt; /* buffer byte count */
  180. u16 l4i_chk; /* CPU provided TCP checksum */
  181. u32 cmd_sts; /* Command/status field */
  182. u32 next_desc_ptr; /* Pointer to next descriptor */
  183. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  184. };
  185. #elif defined(__LITTLE_ENDIAN)
  186. struct rx_desc {
  187. u32 cmd_sts; /* Descriptor command status */
  188. u16 buf_size; /* Buffer size */
  189. u16 byte_cnt; /* Descriptor buffer byte count */
  190. u32 buf_ptr; /* Descriptor buffer pointer */
  191. u32 next_desc_ptr; /* Next descriptor pointer */
  192. };
  193. struct tx_desc {
  194. u32 cmd_sts; /* Command/status field */
  195. u16 l4i_chk; /* CPU provided TCP checksum */
  196. u16 byte_cnt; /* buffer byte count */
  197. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  198. u32 next_desc_ptr; /* Pointer to next descriptor */
  199. };
  200. #else
  201. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  202. #endif
  203. /* RX & TX descriptor command */
  204. #define BUFFER_OWNED_BY_DMA 0x80000000
  205. /* RX & TX descriptor status */
  206. #define ERROR_SUMMARY 0x00000001
  207. /* RX descriptor status */
  208. #define LAYER_4_CHECKSUM_OK 0x40000000
  209. #define RX_ENABLE_INTERRUPT 0x20000000
  210. #define RX_FIRST_DESC 0x08000000
  211. #define RX_LAST_DESC 0x04000000
  212. #define RX_IP_HDR_OK 0x02000000
  213. #define RX_PKT_IS_IPV4 0x01000000
  214. #define RX_PKT_IS_ETHERNETV2 0x00800000
  215. #define RX_PKT_LAYER4_TYPE_MASK 0x00600000
  216. #define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
  217. #define RX_PKT_IS_VLAN_TAGGED 0x00080000
  218. /* TX descriptor command */
  219. #define TX_ENABLE_INTERRUPT 0x00800000
  220. #define GEN_CRC 0x00400000
  221. #define TX_FIRST_DESC 0x00200000
  222. #define TX_LAST_DESC 0x00100000
  223. #define ZERO_PADDING 0x00080000
  224. #define GEN_IP_V4_CHECKSUM 0x00040000
  225. #define GEN_TCP_UDP_CHECKSUM 0x00020000
  226. #define UDP_FRAME 0x00010000
  227. #define MAC_HDR_EXTRA_4_BYTES 0x00008000
  228. #define MAC_HDR_EXTRA_8_BYTES 0x00000200
  229. #define TX_IHL_SHIFT 11
  230. /* global *******************************************************************/
  231. struct mv643xx_eth_shared_private {
  232. /*
  233. * Ethernet controller base address.
  234. */
  235. void __iomem *base;
  236. /*
  237. * Points at the right SMI instance to use.
  238. */
  239. struct mv643xx_eth_shared_private *smi;
  240. /*
  241. * Provides access to local SMI interface.
  242. */
  243. struct mii_bus *smi_bus;
  244. /*
  245. * If we have access to the error interrupt pin (which is
  246. * somewhat misnamed as it not only reflects internal errors
  247. * but also reflects SMI completion), use that to wait for
  248. * SMI access completion instead of polling the SMI busy bit.
  249. */
  250. int err_interrupt;
  251. wait_queue_head_t smi_busy_wait;
  252. /*
  253. * Per-port MBUS window access register value.
  254. */
  255. u32 win_protect;
  256. /*
  257. * Hardware-specific parameters.
  258. */
  259. unsigned int t_clk;
  260. int extended_rx_coal_limit;
  261. int tx_bw_control;
  262. };
  263. #define TX_BW_CONTROL_ABSENT 0
  264. #define TX_BW_CONTROL_OLD_LAYOUT 1
  265. #define TX_BW_CONTROL_NEW_LAYOUT 2
  266. static int mv643xx_eth_open(struct net_device *dev);
  267. static int mv643xx_eth_stop(struct net_device *dev);
  268. /* per-port *****************************************************************/
  269. struct mib_counters {
  270. u64 good_octets_received;
  271. u32 bad_octets_received;
  272. u32 internal_mac_transmit_err;
  273. u32 good_frames_received;
  274. u32 bad_frames_received;
  275. u32 broadcast_frames_received;
  276. u32 multicast_frames_received;
  277. u32 frames_64_octets;
  278. u32 frames_65_to_127_octets;
  279. u32 frames_128_to_255_octets;
  280. u32 frames_256_to_511_octets;
  281. u32 frames_512_to_1023_octets;
  282. u32 frames_1024_to_max_octets;
  283. u64 good_octets_sent;
  284. u32 good_frames_sent;
  285. u32 excessive_collision;
  286. u32 multicast_frames_sent;
  287. u32 broadcast_frames_sent;
  288. u32 unrec_mac_control_received;
  289. u32 fc_sent;
  290. u32 good_fc_received;
  291. u32 bad_fc_received;
  292. u32 undersize_received;
  293. u32 fragments_received;
  294. u32 oversize_received;
  295. u32 jabber_received;
  296. u32 mac_receive_error;
  297. u32 bad_crc_event;
  298. u32 collision;
  299. u32 late_collision;
  300. };
  301. struct lro_counters {
  302. u32 lro_aggregated;
  303. u32 lro_flushed;
  304. u32 lro_no_desc;
  305. };
  306. struct rx_queue {
  307. int index;
  308. int rx_ring_size;
  309. int rx_desc_count;
  310. int rx_curr_desc;
  311. int rx_used_desc;
  312. struct rx_desc *rx_desc_area;
  313. dma_addr_t rx_desc_dma;
  314. int rx_desc_area_size;
  315. struct sk_buff **rx_skb;
  316. struct net_lro_mgr lro_mgr;
  317. struct net_lro_desc lro_arr[8];
  318. };
  319. struct tx_queue {
  320. int index;
  321. int tx_ring_size;
  322. int tx_desc_count;
  323. int tx_curr_desc;
  324. int tx_used_desc;
  325. struct tx_desc *tx_desc_area;
  326. dma_addr_t tx_desc_dma;
  327. int tx_desc_area_size;
  328. struct sk_buff_head tx_skb;
  329. unsigned long tx_packets;
  330. unsigned long tx_bytes;
  331. unsigned long tx_dropped;
  332. };
  333. struct mv643xx_eth_private {
  334. struct mv643xx_eth_shared_private *shared;
  335. void __iomem *base;
  336. int port_num;
  337. struct net_device *dev;
  338. struct phy_device *phy;
  339. struct timer_list mib_counters_timer;
  340. spinlock_t mib_counters_lock;
  341. struct mib_counters mib_counters;
  342. struct lro_counters lro_counters;
  343. struct work_struct tx_timeout_task;
  344. struct napi_struct napi;
  345. u8 work_link;
  346. u8 work_tx;
  347. u8 work_tx_end;
  348. u8 work_rx;
  349. u8 work_rx_refill;
  350. u8 work_rx_oom;
  351. int skb_size;
  352. struct sk_buff_head rx_recycle;
  353. /*
  354. * RX state.
  355. */
  356. int rx_ring_size;
  357. unsigned long rx_desc_sram_addr;
  358. int rx_desc_sram_size;
  359. int rxq_count;
  360. struct timer_list rx_oom;
  361. struct rx_queue rxq[8];
  362. /*
  363. * TX state.
  364. */
  365. int tx_ring_size;
  366. unsigned long tx_desc_sram_addr;
  367. int tx_desc_sram_size;
  368. int txq_count;
  369. struct tx_queue txq[8];
  370. };
  371. /* port register accessors **************************************************/
  372. static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
  373. {
  374. return readl(mp->shared->base + offset);
  375. }
  376. static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
  377. {
  378. return readl(mp->base + offset);
  379. }
  380. static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
  381. {
  382. writel(data, mp->shared->base + offset);
  383. }
  384. static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
  385. {
  386. writel(data, mp->base + offset);
  387. }
  388. /* rxq/txq helper functions *************************************************/
  389. static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
  390. {
  391. return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
  392. }
  393. static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
  394. {
  395. return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
  396. }
  397. static void rxq_enable(struct rx_queue *rxq)
  398. {
  399. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  400. wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
  401. }
  402. static void rxq_disable(struct rx_queue *rxq)
  403. {
  404. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  405. u8 mask = 1 << rxq->index;
  406. wrlp(mp, RXQ_COMMAND, mask << 8);
  407. while (rdlp(mp, RXQ_COMMAND) & mask)
  408. udelay(10);
  409. }
  410. static void txq_reset_hw_ptr(struct tx_queue *txq)
  411. {
  412. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  413. u32 addr;
  414. addr = (u32)txq->tx_desc_dma;
  415. addr += txq->tx_curr_desc * sizeof(struct tx_desc);
  416. wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
  417. }
  418. static void txq_enable(struct tx_queue *txq)
  419. {
  420. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  421. wrlp(mp, TXQ_COMMAND, 1 << txq->index);
  422. }
  423. static void txq_disable(struct tx_queue *txq)
  424. {
  425. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  426. u8 mask = 1 << txq->index;
  427. wrlp(mp, TXQ_COMMAND, mask << 8);
  428. while (rdlp(mp, TXQ_COMMAND) & mask)
  429. udelay(10);
  430. }
  431. static void txq_maybe_wake(struct tx_queue *txq)
  432. {
  433. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  434. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  435. if (netif_tx_queue_stopped(nq)) {
  436. __netif_tx_lock(nq, smp_processor_id());
  437. if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
  438. netif_tx_wake_queue(nq);
  439. __netif_tx_unlock(nq);
  440. }
  441. }
  442. /* rx napi ******************************************************************/
  443. static int
  444. mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
  445. u64 *hdr_flags, void *priv)
  446. {
  447. unsigned long cmd_sts = (unsigned long)priv;
  448. /*
  449. * Make sure that this packet is Ethernet II, is not VLAN
  450. * tagged, is IPv4, has a valid IP header, and is TCP.
  451. */
  452. if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
  453. RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK |
  454. RX_PKT_IS_VLAN_TAGGED)) !=
  455. (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
  456. RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4))
  457. return -1;
  458. skb_reset_network_header(skb);
  459. skb_set_transport_header(skb, ip_hdrlen(skb));
  460. *iphdr = ip_hdr(skb);
  461. *tcph = tcp_hdr(skb);
  462. *hdr_flags = LRO_IPV4 | LRO_TCP;
  463. return 0;
  464. }
  465. static int rxq_process(struct rx_queue *rxq, int budget)
  466. {
  467. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  468. struct net_device_stats *stats = &mp->dev->stats;
  469. int lro_flush_needed;
  470. int rx;
  471. lro_flush_needed = 0;
  472. rx = 0;
  473. while (rx < budget && rxq->rx_desc_count) {
  474. struct rx_desc *rx_desc;
  475. unsigned int cmd_sts;
  476. struct sk_buff *skb;
  477. u16 byte_cnt;
  478. rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
  479. cmd_sts = rx_desc->cmd_sts;
  480. if (cmd_sts & BUFFER_OWNED_BY_DMA)
  481. break;
  482. rmb();
  483. skb = rxq->rx_skb[rxq->rx_curr_desc];
  484. rxq->rx_skb[rxq->rx_curr_desc] = NULL;
  485. rxq->rx_curr_desc++;
  486. if (rxq->rx_curr_desc == rxq->rx_ring_size)
  487. rxq->rx_curr_desc = 0;
  488. dma_unmap_single(NULL, rx_desc->buf_ptr,
  489. rx_desc->buf_size, DMA_FROM_DEVICE);
  490. rxq->rx_desc_count--;
  491. rx++;
  492. mp->work_rx_refill |= 1 << rxq->index;
  493. byte_cnt = rx_desc->byte_cnt;
  494. /*
  495. * Update statistics.
  496. *
  497. * Note that the descriptor byte count includes 2 dummy
  498. * bytes automatically inserted by the hardware at the
  499. * start of the packet (which we don't count), and a 4
  500. * byte CRC at the end of the packet (which we do count).
  501. */
  502. stats->rx_packets++;
  503. stats->rx_bytes += byte_cnt - 2;
  504. /*
  505. * In case we received a packet without first / last bits
  506. * on, or the error summary bit is set, the packet needs
  507. * to be dropped.
  508. */
  509. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
  510. != (RX_FIRST_DESC | RX_LAST_DESC))
  511. goto err;
  512. /*
  513. * The -4 is for the CRC in the trailer of the
  514. * received packet
  515. */
  516. skb_put(skb, byte_cnt - 2 - 4);
  517. if (cmd_sts & LAYER_4_CHECKSUM_OK)
  518. skb->ip_summed = CHECKSUM_UNNECESSARY;
  519. skb->protocol = eth_type_trans(skb, mp->dev);
  520. if (skb->dev->features & NETIF_F_LRO &&
  521. skb->ip_summed == CHECKSUM_UNNECESSARY) {
  522. lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts);
  523. lro_flush_needed = 1;
  524. } else
  525. netif_receive_skb(skb);
  526. continue;
  527. err:
  528. stats->rx_dropped++;
  529. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  530. (RX_FIRST_DESC | RX_LAST_DESC)) {
  531. if (net_ratelimit())
  532. dev_printk(KERN_ERR, &mp->dev->dev,
  533. "received packet spanning "
  534. "multiple descriptors\n");
  535. }
  536. if (cmd_sts & ERROR_SUMMARY)
  537. stats->rx_errors++;
  538. dev_kfree_skb(skb);
  539. }
  540. if (lro_flush_needed)
  541. lro_flush_all(&rxq->lro_mgr);
  542. if (rx < budget)
  543. mp->work_rx &= ~(1 << rxq->index);
  544. return rx;
  545. }
  546. static int rxq_refill(struct rx_queue *rxq, int budget)
  547. {
  548. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  549. int refilled;
  550. refilled = 0;
  551. while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
  552. struct sk_buff *skb;
  553. int unaligned;
  554. int rx;
  555. struct rx_desc *rx_desc;
  556. skb = __skb_dequeue(&mp->rx_recycle);
  557. if (skb == NULL)
  558. skb = dev_alloc_skb(mp->skb_size +
  559. dma_get_cache_alignment() - 1);
  560. if (skb == NULL) {
  561. mp->work_rx_oom |= 1 << rxq->index;
  562. goto oom;
  563. }
  564. unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
  565. if (unaligned)
  566. skb_reserve(skb, dma_get_cache_alignment() - unaligned);
  567. refilled++;
  568. rxq->rx_desc_count++;
  569. rx = rxq->rx_used_desc++;
  570. if (rxq->rx_used_desc == rxq->rx_ring_size)
  571. rxq->rx_used_desc = 0;
  572. rx_desc = rxq->rx_desc_area + rx;
  573. rx_desc->buf_ptr = dma_map_single(NULL, skb->data,
  574. mp->skb_size, DMA_FROM_DEVICE);
  575. rx_desc->buf_size = mp->skb_size;
  576. rxq->rx_skb[rx] = skb;
  577. wmb();
  578. rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
  579. wmb();
  580. /*
  581. * The hardware automatically prepends 2 bytes of
  582. * dummy data to each received packet, so that the
  583. * IP header ends up 16-byte aligned.
  584. */
  585. skb_reserve(skb, 2);
  586. }
  587. if (refilled < budget)
  588. mp->work_rx_refill &= ~(1 << rxq->index);
  589. oom:
  590. return refilled;
  591. }
  592. /* tx ***********************************************************************/
  593. static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
  594. {
  595. int frag;
  596. for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
  597. skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
  598. if (fragp->size <= 8 && fragp->page_offset & 7)
  599. return 1;
  600. }
  601. return 0;
  602. }
  603. static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
  604. {
  605. int nr_frags = skb_shinfo(skb)->nr_frags;
  606. int frag;
  607. for (frag = 0; frag < nr_frags; frag++) {
  608. skb_frag_t *this_frag;
  609. int tx_index;
  610. struct tx_desc *desc;
  611. this_frag = &skb_shinfo(skb)->frags[frag];
  612. tx_index = txq->tx_curr_desc++;
  613. if (txq->tx_curr_desc == txq->tx_ring_size)
  614. txq->tx_curr_desc = 0;
  615. desc = &txq->tx_desc_area[tx_index];
  616. /*
  617. * The last fragment will generate an interrupt
  618. * which will free the skb on TX completion.
  619. */
  620. if (frag == nr_frags - 1) {
  621. desc->cmd_sts = BUFFER_OWNED_BY_DMA |
  622. ZERO_PADDING | TX_LAST_DESC |
  623. TX_ENABLE_INTERRUPT;
  624. } else {
  625. desc->cmd_sts = BUFFER_OWNED_BY_DMA;
  626. }
  627. desc->l4i_chk = 0;
  628. desc->byte_cnt = this_frag->size;
  629. desc->buf_ptr = dma_map_page(NULL, this_frag->page,
  630. this_frag->page_offset,
  631. this_frag->size,
  632. DMA_TO_DEVICE);
  633. }
  634. }
  635. static inline __be16 sum16_as_be(__sum16 sum)
  636. {
  637. return (__force __be16)sum;
  638. }
  639. static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
  640. {
  641. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  642. int nr_frags = skb_shinfo(skb)->nr_frags;
  643. int tx_index;
  644. struct tx_desc *desc;
  645. u32 cmd_sts;
  646. u16 l4i_chk;
  647. int length;
  648. cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
  649. l4i_chk = 0;
  650. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  651. int tag_bytes;
  652. BUG_ON(skb->protocol != htons(ETH_P_IP) &&
  653. skb->protocol != htons(ETH_P_8021Q));
  654. tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN;
  655. if (unlikely(tag_bytes & ~12)) {
  656. if (skb_checksum_help(skb) == 0)
  657. goto no_csum;
  658. kfree_skb(skb);
  659. return 1;
  660. }
  661. if (tag_bytes & 4)
  662. cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
  663. if (tag_bytes & 8)
  664. cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
  665. cmd_sts |= GEN_TCP_UDP_CHECKSUM |
  666. GEN_IP_V4_CHECKSUM |
  667. ip_hdr(skb)->ihl << TX_IHL_SHIFT;
  668. switch (ip_hdr(skb)->protocol) {
  669. case IPPROTO_UDP:
  670. cmd_sts |= UDP_FRAME;
  671. l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
  672. break;
  673. case IPPROTO_TCP:
  674. l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
  675. break;
  676. default:
  677. BUG();
  678. }
  679. } else {
  680. no_csum:
  681. /* Errata BTS #50, IHL must be 5 if no HW checksum */
  682. cmd_sts |= 5 << TX_IHL_SHIFT;
  683. }
  684. tx_index = txq->tx_curr_desc++;
  685. if (txq->tx_curr_desc == txq->tx_ring_size)
  686. txq->tx_curr_desc = 0;
  687. desc = &txq->tx_desc_area[tx_index];
  688. if (nr_frags) {
  689. txq_submit_frag_skb(txq, skb);
  690. length = skb_headlen(skb);
  691. } else {
  692. cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
  693. length = skb->len;
  694. }
  695. desc->l4i_chk = l4i_chk;
  696. desc->byte_cnt = length;
  697. desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
  698. __skb_queue_tail(&txq->tx_skb, skb);
  699. /* ensure all other descriptors are written before first cmd_sts */
  700. wmb();
  701. desc->cmd_sts = cmd_sts;
  702. /* clear TX_END status */
  703. mp->work_tx_end &= ~(1 << txq->index);
  704. /* ensure all descriptors are written before poking hardware */
  705. wmb();
  706. txq_enable(txq);
  707. txq->tx_desc_count += nr_frags + 1;
  708. return 0;
  709. }
  710. static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  711. {
  712. struct mv643xx_eth_private *mp = netdev_priv(dev);
  713. int queue;
  714. struct tx_queue *txq;
  715. struct netdev_queue *nq;
  716. queue = skb_get_queue_mapping(skb);
  717. txq = mp->txq + queue;
  718. nq = netdev_get_tx_queue(dev, queue);
  719. if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
  720. txq->tx_dropped++;
  721. dev_printk(KERN_DEBUG, &dev->dev,
  722. "failed to linearize skb with tiny "
  723. "unaligned fragment\n");
  724. return NETDEV_TX_BUSY;
  725. }
  726. if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
  727. if (net_ratelimit())
  728. dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
  729. kfree_skb(skb);
  730. return NETDEV_TX_OK;
  731. }
  732. if (!txq_submit_skb(txq, skb)) {
  733. int entries_left;
  734. txq->tx_bytes += skb->len;
  735. txq->tx_packets++;
  736. dev->trans_start = jiffies;
  737. entries_left = txq->tx_ring_size - txq->tx_desc_count;
  738. if (entries_left < MAX_SKB_FRAGS + 1)
  739. netif_tx_stop_queue(nq);
  740. }
  741. return NETDEV_TX_OK;
  742. }
  743. /* tx napi ******************************************************************/
  744. static void txq_kick(struct tx_queue *txq)
  745. {
  746. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  747. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  748. u32 hw_desc_ptr;
  749. u32 expected_ptr;
  750. __netif_tx_lock(nq, smp_processor_id());
  751. if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
  752. goto out;
  753. hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
  754. expected_ptr = (u32)txq->tx_desc_dma +
  755. txq->tx_curr_desc * sizeof(struct tx_desc);
  756. if (hw_desc_ptr != expected_ptr)
  757. txq_enable(txq);
  758. out:
  759. __netif_tx_unlock(nq);
  760. mp->work_tx_end &= ~(1 << txq->index);
  761. }
  762. static int txq_reclaim(struct tx_queue *txq, int budget, int force)
  763. {
  764. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  765. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  766. int reclaimed;
  767. __netif_tx_lock(nq, smp_processor_id());
  768. reclaimed = 0;
  769. while (reclaimed < budget && txq->tx_desc_count > 0) {
  770. int tx_index;
  771. struct tx_desc *desc;
  772. u32 cmd_sts;
  773. struct sk_buff *skb;
  774. tx_index = txq->tx_used_desc;
  775. desc = &txq->tx_desc_area[tx_index];
  776. cmd_sts = desc->cmd_sts;
  777. if (cmd_sts & BUFFER_OWNED_BY_DMA) {
  778. if (!force)
  779. break;
  780. desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
  781. }
  782. txq->tx_used_desc = tx_index + 1;
  783. if (txq->tx_used_desc == txq->tx_ring_size)
  784. txq->tx_used_desc = 0;
  785. reclaimed++;
  786. txq->tx_desc_count--;
  787. skb = NULL;
  788. if (cmd_sts & TX_LAST_DESC)
  789. skb = __skb_dequeue(&txq->tx_skb);
  790. if (cmd_sts & ERROR_SUMMARY) {
  791. dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
  792. mp->dev->stats.tx_errors++;
  793. }
  794. if (cmd_sts & TX_FIRST_DESC) {
  795. dma_unmap_single(NULL, desc->buf_ptr,
  796. desc->byte_cnt, DMA_TO_DEVICE);
  797. } else {
  798. dma_unmap_page(NULL, desc->buf_ptr,
  799. desc->byte_cnt, DMA_TO_DEVICE);
  800. }
  801. if (skb != NULL) {
  802. if (skb_queue_len(&mp->rx_recycle) <
  803. mp->rx_ring_size &&
  804. skb_recycle_check(skb, mp->skb_size +
  805. dma_get_cache_alignment() - 1))
  806. __skb_queue_head(&mp->rx_recycle, skb);
  807. else
  808. dev_kfree_skb(skb);
  809. }
  810. }
  811. __netif_tx_unlock(nq);
  812. if (reclaimed < budget)
  813. mp->work_tx &= ~(1 << txq->index);
  814. return reclaimed;
  815. }
  816. /* tx rate control **********************************************************/
  817. /*
  818. * Set total maximum TX rate (shared by all TX queues for this port)
  819. * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
  820. */
  821. static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
  822. {
  823. int token_rate;
  824. int mtu;
  825. int bucket_size;
  826. token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
  827. if (token_rate > 1023)
  828. token_rate = 1023;
  829. mtu = (mp->dev->mtu + 255) >> 8;
  830. if (mtu > 63)
  831. mtu = 63;
  832. bucket_size = (burst + 255) >> 8;
  833. if (bucket_size > 65535)
  834. bucket_size = 65535;
  835. switch (mp->shared->tx_bw_control) {
  836. case TX_BW_CONTROL_OLD_LAYOUT:
  837. wrlp(mp, TX_BW_RATE, token_rate);
  838. wrlp(mp, TX_BW_MTU, mtu);
  839. wrlp(mp, TX_BW_BURST, bucket_size);
  840. break;
  841. case TX_BW_CONTROL_NEW_LAYOUT:
  842. wrlp(mp, TX_BW_RATE_MOVED, token_rate);
  843. wrlp(mp, TX_BW_MTU_MOVED, mtu);
  844. wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
  845. break;
  846. }
  847. }
  848. static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
  849. {
  850. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  851. int token_rate;
  852. int bucket_size;
  853. token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
  854. if (token_rate > 1023)
  855. token_rate = 1023;
  856. bucket_size = (burst + 255) >> 8;
  857. if (bucket_size > 65535)
  858. bucket_size = 65535;
  859. wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
  860. wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
  861. }
  862. static void txq_set_fixed_prio_mode(struct tx_queue *txq)
  863. {
  864. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  865. int off;
  866. u32 val;
  867. /*
  868. * Turn on fixed priority mode.
  869. */
  870. off = 0;
  871. switch (mp->shared->tx_bw_control) {
  872. case TX_BW_CONTROL_OLD_LAYOUT:
  873. off = TXQ_FIX_PRIO_CONF;
  874. break;
  875. case TX_BW_CONTROL_NEW_LAYOUT:
  876. off = TXQ_FIX_PRIO_CONF_MOVED;
  877. break;
  878. }
  879. if (off) {
  880. val = rdlp(mp, off);
  881. val |= 1 << txq->index;
  882. wrlp(mp, off, val);
  883. }
  884. }
  885. static void txq_set_wrr(struct tx_queue *txq, int weight)
  886. {
  887. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  888. int off;
  889. u32 val;
  890. /*
  891. * Turn off fixed priority mode.
  892. */
  893. off = 0;
  894. switch (mp->shared->tx_bw_control) {
  895. case TX_BW_CONTROL_OLD_LAYOUT:
  896. off = TXQ_FIX_PRIO_CONF;
  897. break;
  898. case TX_BW_CONTROL_NEW_LAYOUT:
  899. off = TXQ_FIX_PRIO_CONF_MOVED;
  900. break;
  901. }
  902. if (off) {
  903. val = rdlp(mp, off);
  904. val &= ~(1 << txq->index);
  905. wrlp(mp, off, val);
  906. /*
  907. * Configure WRR weight for this queue.
  908. */
  909. val = rdlp(mp, off);
  910. val = (val & ~0xff) | (weight & 0xff);
  911. wrlp(mp, TXQ_BW_WRR_CONF(txq->index), val);
  912. }
  913. }
  914. /* mii management interface *************************************************/
  915. static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
  916. {
  917. struct mv643xx_eth_shared_private *msp = dev_id;
  918. if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
  919. writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
  920. wake_up(&msp->smi_busy_wait);
  921. return IRQ_HANDLED;
  922. }
  923. return IRQ_NONE;
  924. }
  925. static int smi_is_done(struct mv643xx_eth_shared_private *msp)
  926. {
  927. return !(readl(msp->base + SMI_REG) & SMI_BUSY);
  928. }
  929. static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
  930. {
  931. if (msp->err_interrupt == NO_IRQ) {
  932. int i;
  933. for (i = 0; !smi_is_done(msp); i++) {
  934. if (i == 10)
  935. return -ETIMEDOUT;
  936. msleep(10);
  937. }
  938. return 0;
  939. }
  940. if (!smi_is_done(msp)) {
  941. wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
  942. msecs_to_jiffies(100));
  943. if (!smi_is_done(msp))
  944. return -ETIMEDOUT;
  945. }
  946. return 0;
  947. }
  948. static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
  949. {
  950. struct mv643xx_eth_shared_private *msp = bus->priv;
  951. void __iomem *smi_reg = msp->base + SMI_REG;
  952. int ret;
  953. if (smi_wait_ready(msp)) {
  954. printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
  955. return -ETIMEDOUT;
  956. }
  957. writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
  958. if (smi_wait_ready(msp)) {
  959. printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
  960. return -ETIMEDOUT;
  961. }
  962. ret = readl(smi_reg);
  963. if (!(ret & SMI_READ_VALID)) {
  964. printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n");
  965. return -ENODEV;
  966. }
  967. return ret & 0xffff;
  968. }
  969. static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
  970. {
  971. struct mv643xx_eth_shared_private *msp = bus->priv;
  972. void __iomem *smi_reg = msp->base + SMI_REG;
  973. if (smi_wait_ready(msp)) {
  974. printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
  975. return -ETIMEDOUT;
  976. }
  977. writel(SMI_OPCODE_WRITE | (reg << 21) |
  978. (addr << 16) | (val & 0xffff), smi_reg);
  979. if (smi_wait_ready(msp)) {
  980. printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
  981. return -ETIMEDOUT;
  982. }
  983. return 0;
  984. }
  985. /* statistics ***************************************************************/
  986. static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
  987. {
  988. struct mv643xx_eth_private *mp = netdev_priv(dev);
  989. struct net_device_stats *stats = &dev->stats;
  990. unsigned long tx_packets = 0;
  991. unsigned long tx_bytes = 0;
  992. unsigned long tx_dropped = 0;
  993. int i;
  994. for (i = 0; i < mp->txq_count; i++) {
  995. struct tx_queue *txq = mp->txq + i;
  996. tx_packets += txq->tx_packets;
  997. tx_bytes += txq->tx_bytes;
  998. tx_dropped += txq->tx_dropped;
  999. }
  1000. stats->tx_packets = tx_packets;
  1001. stats->tx_bytes = tx_bytes;
  1002. stats->tx_dropped = tx_dropped;
  1003. return stats;
  1004. }
  1005. static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp)
  1006. {
  1007. u32 lro_aggregated = 0;
  1008. u32 lro_flushed = 0;
  1009. u32 lro_no_desc = 0;
  1010. int i;
  1011. for (i = 0; i < mp->rxq_count; i++) {
  1012. struct rx_queue *rxq = mp->rxq + i;
  1013. lro_aggregated += rxq->lro_mgr.stats.aggregated;
  1014. lro_flushed += rxq->lro_mgr.stats.flushed;
  1015. lro_no_desc += rxq->lro_mgr.stats.no_desc;
  1016. }
  1017. mp->lro_counters.lro_aggregated = lro_aggregated;
  1018. mp->lro_counters.lro_flushed = lro_flushed;
  1019. mp->lro_counters.lro_no_desc = lro_no_desc;
  1020. }
  1021. static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
  1022. {
  1023. return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
  1024. }
  1025. static void mib_counters_clear(struct mv643xx_eth_private *mp)
  1026. {
  1027. int i;
  1028. for (i = 0; i < 0x80; i += 4)
  1029. mib_read(mp, i);
  1030. }
  1031. static void mib_counters_update(struct mv643xx_eth_private *mp)
  1032. {
  1033. struct mib_counters *p = &mp->mib_counters;
  1034. spin_lock_bh(&mp->mib_counters_lock);
  1035. p->good_octets_received += mib_read(mp, 0x00);
  1036. p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
  1037. p->bad_octets_received += mib_read(mp, 0x08);
  1038. p->internal_mac_transmit_err += mib_read(mp, 0x0c);
  1039. p->good_frames_received += mib_read(mp, 0x10);
  1040. p->bad_frames_received += mib_read(mp, 0x14);
  1041. p->broadcast_frames_received += mib_read(mp, 0x18);
  1042. p->multicast_frames_received += mib_read(mp, 0x1c);
  1043. p->frames_64_octets += mib_read(mp, 0x20);
  1044. p->frames_65_to_127_octets += mib_read(mp, 0x24);
  1045. p->frames_128_to_255_octets += mib_read(mp, 0x28);
  1046. p->frames_256_to_511_octets += mib_read(mp, 0x2c);
  1047. p->frames_512_to_1023_octets += mib_read(mp, 0x30);
  1048. p->frames_1024_to_max_octets += mib_read(mp, 0x34);
  1049. p->good_octets_sent += mib_read(mp, 0x38);
  1050. p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
  1051. p->good_frames_sent += mib_read(mp, 0x40);
  1052. p->excessive_collision += mib_read(mp, 0x44);
  1053. p->multicast_frames_sent += mib_read(mp, 0x48);
  1054. p->broadcast_frames_sent += mib_read(mp, 0x4c);
  1055. p->unrec_mac_control_received += mib_read(mp, 0x50);
  1056. p->fc_sent += mib_read(mp, 0x54);
  1057. p->good_fc_received += mib_read(mp, 0x58);
  1058. p->bad_fc_received += mib_read(mp, 0x5c);
  1059. p->undersize_received += mib_read(mp, 0x60);
  1060. p->fragments_received += mib_read(mp, 0x64);
  1061. p->oversize_received += mib_read(mp, 0x68);
  1062. p->jabber_received += mib_read(mp, 0x6c);
  1063. p->mac_receive_error += mib_read(mp, 0x70);
  1064. p->bad_crc_event += mib_read(mp, 0x74);
  1065. p->collision += mib_read(mp, 0x78);
  1066. p->late_collision += mib_read(mp, 0x7c);
  1067. spin_unlock_bh(&mp->mib_counters_lock);
  1068. mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
  1069. }
  1070. static void mib_counters_timer_wrapper(unsigned long _mp)
  1071. {
  1072. struct mv643xx_eth_private *mp = (void *)_mp;
  1073. mib_counters_update(mp);
  1074. }
  1075. /* interrupt coalescing *****************************************************/
  1076. /*
  1077. * Hardware coalescing parameters are set in units of 64 t_clk
  1078. * cycles. I.e.:
  1079. *
  1080. * coal_delay_in_usec = 64000000 * register_value / t_clk_rate
  1081. *
  1082. * register_value = coal_delay_in_usec * t_clk_rate / 64000000
  1083. *
  1084. * In the ->set*() methods, we round the computed register value
  1085. * to the nearest integer.
  1086. */
  1087. static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
  1088. {
  1089. u32 val = rdlp(mp, SDMA_CONFIG);
  1090. u64 temp;
  1091. if (mp->shared->extended_rx_coal_limit)
  1092. temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
  1093. else
  1094. temp = (val & 0x003fff00) >> 8;
  1095. temp *= 64000000;
  1096. do_div(temp, mp->shared->t_clk);
  1097. return (unsigned int)temp;
  1098. }
  1099. static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
  1100. {
  1101. u64 temp;
  1102. u32 val;
  1103. temp = (u64)usec * mp->shared->t_clk;
  1104. temp += 31999999;
  1105. do_div(temp, 64000000);
  1106. val = rdlp(mp, SDMA_CONFIG);
  1107. if (mp->shared->extended_rx_coal_limit) {
  1108. if (temp > 0xffff)
  1109. temp = 0xffff;
  1110. val &= ~0x023fff80;
  1111. val |= (temp & 0x8000) << 10;
  1112. val |= (temp & 0x7fff) << 7;
  1113. } else {
  1114. if (temp > 0x3fff)
  1115. temp = 0x3fff;
  1116. val &= ~0x003fff00;
  1117. val |= (temp & 0x3fff) << 8;
  1118. }
  1119. wrlp(mp, SDMA_CONFIG, val);
  1120. }
  1121. static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
  1122. {
  1123. u64 temp;
  1124. temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
  1125. temp *= 64000000;
  1126. do_div(temp, mp->shared->t_clk);
  1127. return (unsigned int)temp;
  1128. }
  1129. static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
  1130. {
  1131. u64 temp;
  1132. temp = (u64)usec * mp->shared->t_clk;
  1133. temp += 31999999;
  1134. do_div(temp, 64000000);
  1135. if (temp > 0x3fff)
  1136. temp = 0x3fff;
  1137. wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
  1138. }
  1139. /* ethtool ******************************************************************/
  1140. struct mv643xx_eth_stats {
  1141. char stat_string[ETH_GSTRING_LEN];
  1142. int sizeof_stat;
  1143. int netdev_off;
  1144. int mp_off;
  1145. };
  1146. #define SSTAT(m) \
  1147. { #m, FIELD_SIZEOF(struct net_device_stats, m), \
  1148. offsetof(struct net_device, stats.m), -1 }
  1149. #define MIBSTAT(m) \
  1150. { #m, FIELD_SIZEOF(struct mib_counters, m), \
  1151. -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
  1152. #define LROSTAT(m) \
  1153. { #m, FIELD_SIZEOF(struct lro_counters, m), \
  1154. -1, offsetof(struct mv643xx_eth_private, lro_counters.m) }
  1155. static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
  1156. SSTAT(rx_packets),
  1157. SSTAT(tx_packets),
  1158. SSTAT(rx_bytes),
  1159. SSTAT(tx_bytes),
  1160. SSTAT(rx_errors),
  1161. SSTAT(tx_errors),
  1162. SSTAT(rx_dropped),
  1163. SSTAT(tx_dropped),
  1164. MIBSTAT(good_octets_received),
  1165. MIBSTAT(bad_octets_received),
  1166. MIBSTAT(internal_mac_transmit_err),
  1167. MIBSTAT(good_frames_received),
  1168. MIBSTAT(bad_frames_received),
  1169. MIBSTAT(broadcast_frames_received),
  1170. MIBSTAT(multicast_frames_received),
  1171. MIBSTAT(frames_64_octets),
  1172. MIBSTAT(frames_65_to_127_octets),
  1173. MIBSTAT(frames_128_to_255_octets),
  1174. MIBSTAT(frames_256_to_511_octets),
  1175. MIBSTAT(frames_512_to_1023_octets),
  1176. MIBSTAT(frames_1024_to_max_octets),
  1177. MIBSTAT(good_octets_sent),
  1178. MIBSTAT(good_frames_sent),
  1179. MIBSTAT(excessive_collision),
  1180. MIBSTAT(multicast_frames_sent),
  1181. MIBSTAT(broadcast_frames_sent),
  1182. MIBSTAT(unrec_mac_control_received),
  1183. MIBSTAT(fc_sent),
  1184. MIBSTAT(good_fc_received),
  1185. MIBSTAT(bad_fc_received),
  1186. MIBSTAT(undersize_received),
  1187. MIBSTAT(fragments_received),
  1188. MIBSTAT(oversize_received),
  1189. MIBSTAT(jabber_received),
  1190. MIBSTAT(mac_receive_error),
  1191. MIBSTAT(bad_crc_event),
  1192. MIBSTAT(collision),
  1193. MIBSTAT(late_collision),
  1194. LROSTAT(lro_aggregated),
  1195. LROSTAT(lro_flushed),
  1196. LROSTAT(lro_no_desc),
  1197. };
  1198. static int
  1199. mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp,
  1200. struct ethtool_cmd *cmd)
  1201. {
  1202. int err;
  1203. err = phy_read_status(mp->phy);
  1204. if (err == 0)
  1205. err = phy_ethtool_gset(mp->phy, cmd);
  1206. /*
  1207. * The MAC does not support 1000baseT_Half.
  1208. */
  1209. cmd->supported &= ~SUPPORTED_1000baseT_Half;
  1210. cmd->advertising &= ~ADVERTISED_1000baseT_Half;
  1211. return err;
  1212. }
  1213. static int
  1214. mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
  1215. struct ethtool_cmd *cmd)
  1216. {
  1217. u32 port_status;
  1218. port_status = rdlp(mp, PORT_STATUS);
  1219. cmd->supported = SUPPORTED_MII;
  1220. cmd->advertising = ADVERTISED_MII;
  1221. switch (port_status & PORT_SPEED_MASK) {
  1222. case PORT_SPEED_10:
  1223. cmd->speed = SPEED_10;
  1224. break;
  1225. case PORT_SPEED_100:
  1226. cmd->speed = SPEED_100;
  1227. break;
  1228. case PORT_SPEED_1000:
  1229. cmd->speed = SPEED_1000;
  1230. break;
  1231. default:
  1232. cmd->speed = -1;
  1233. break;
  1234. }
  1235. cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
  1236. cmd->port = PORT_MII;
  1237. cmd->phy_address = 0;
  1238. cmd->transceiver = XCVR_INTERNAL;
  1239. cmd->autoneg = AUTONEG_DISABLE;
  1240. cmd->maxtxpkt = 1;
  1241. cmd->maxrxpkt = 1;
  1242. return 0;
  1243. }
  1244. static int
  1245. mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1246. {
  1247. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1248. if (mp->phy != NULL)
  1249. return mv643xx_eth_get_settings_phy(mp, cmd);
  1250. else
  1251. return mv643xx_eth_get_settings_phyless(mp, cmd);
  1252. }
  1253. static int
  1254. mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1255. {
  1256. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1257. if (mp->phy == NULL)
  1258. return -EINVAL;
  1259. /*
  1260. * The MAC does not support 1000baseT_Half.
  1261. */
  1262. cmd->advertising &= ~ADVERTISED_1000baseT_Half;
  1263. return phy_ethtool_sset(mp->phy, cmd);
  1264. }
  1265. static void mv643xx_eth_get_drvinfo(struct net_device *dev,
  1266. struct ethtool_drvinfo *drvinfo)
  1267. {
  1268. strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
  1269. strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
  1270. strncpy(drvinfo->fw_version, "N/A", 32);
  1271. strncpy(drvinfo->bus_info, "platform", 32);
  1272. drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
  1273. }
  1274. static int mv643xx_eth_nway_reset(struct net_device *dev)
  1275. {
  1276. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1277. if (mp->phy == NULL)
  1278. return -EINVAL;
  1279. return genphy_restart_aneg(mp->phy);
  1280. }
  1281. static u32 mv643xx_eth_get_link(struct net_device *dev)
  1282. {
  1283. return !!netif_carrier_ok(dev);
  1284. }
  1285. static int
  1286. mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
  1287. {
  1288. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1289. ec->rx_coalesce_usecs = get_rx_coal(mp);
  1290. ec->tx_coalesce_usecs = get_tx_coal(mp);
  1291. return 0;
  1292. }
  1293. static int
  1294. mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
  1295. {
  1296. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1297. set_rx_coal(mp, ec->rx_coalesce_usecs);
  1298. set_tx_coal(mp, ec->tx_coalesce_usecs);
  1299. return 0;
  1300. }
  1301. static void
  1302. mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
  1303. {
  1304. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1305. er->rx_max_pending = 4096;
  1306. er->tx_max_pending = 4096;
  1307. er->rx_mini_max_pending = 0;
  1308. er->rx_jumbo_max_pending = 0;
  1309. er->rx_pending = mp->rx_ring_size;
  1310. er->tx_pending = mp->tx_ring_size;
  1311. er->rx_mini_pending = 0;
  1312. er->rx_jumbo_pending = 0;
  1313. }
  1314. static int
  1315. mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
  1316. {
  1317. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1318. if (er->rx_mini_pending || er->rx_jumbo_pending)
  1319. return -EINVAL;
  1320. mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
  1321. mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096;
  1322. if (netif_running(dev)) {
  1323. mv643xx_eth_stop(dev);
  1324. if (mv643xx_eth_open(dev)) {
  1325. dev_printk(KERN_ERR, &dev->dev,
  1326. "fatal error on re-opening device after "
  1327. "ring param change\n");
  1328. return -ENOMEM;
  1329. }
  1330. }
  1331. return 0;
  1332. }
  1333. static u32
  1334. mv643xx_eth_get_rx_csum(struct net_device *dev)
  1335. {
  1336. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1337. return !!(rdlp(mp, PORT_CONFIG) & 0x02000000);
  1338. }
  1339. static int
  1340. mv643xx_eth_set_rx_csum(struct net_device *dev, u32 rx_csum)
  1341. {
  1342. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1343. wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
  1344. return 0;
  1345. }
  1346. static void mv643xx_eth_get_strings(struct net_device *dev,
  1347. uint32_t stringset, uint8_t *data)
  1348. {
  1349. int i;
  1350. if (stringset == ETH_SS_STATS) {
  1351. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1352. memcpy(data + i * ETH_GSTRING_LEN,
  1353. mv643xx_eth_stats[i].stat_string,
  1354. ETH_GSTRING_LEN);
  1355. }
  1356. }
  1357. }
  1358. static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
  1359. struct ethtool_stats *stats,
  1360. uint64_t *data)
  1361. {
  1362. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1363. int i;
  1364. mv643xx_eth_get_stats(dev);
  1365. mib_counters_update(mp);
  1366. mv643xx_eth_grab_lro_stats(mp);
  1367. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1368. const struct mv643xx_eth_stats *stat;
  1369. void *p;
  1370. stat = mv643xx_eth_stats + i;
  1371. if (stat->netdev_off >= 0)
  1372. p = ((void *)mp->dev) + stat->netdev_off;
  1373. else
  1374. p = ((void *)mp) + stat->mp_off;
  1375. data[i] = (stat->sizeof_stat == 8) ?
  1376. *(uint64_t *)p : *(uint32_t *)p;
  1377. }
  1378. }
  1379. static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
  1380. {
  1381. if (sset == ETH_SS_STATS)
  1382. return ARRAY_SIZE(mv643xx_eth_stats);
  1383. return -EOPNOTSUPP;
  1384. }
  1385. static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
  1386. .get_settings = mv643xx_eth_get_settings,
  1387. .set_settings = mv643xx_eth_set_settings,
  1388. .get_drvinfo = mv643xx_eth_get_drvinfo,
  1389. .nway_reset = mv643xx_eth_nway_reset,
  1390. .get_link = mv643xx_eth_get_link,
  1391. .get_coalesce = mv643xx_eth_get_coalesce,
  1392. .set_coalesce = mv643xx_eth_set_coalesce,
  1393. .get_ringparam = mv643xx_eth_get_ringparam,
  1394. .set_ringparam = mv643xx_eth_set_ringparam,
  1395. .get_rx_csum = mv643xx_eth_get_rx_csum,
  1396. .set_rx_csum = mv643xx_eth_set_rx_csum,
  1397. .set_tx_csum = ethtool_op_set_tx_csum,
  1398. .set_sg = ethtool_op_set_sg,
  1399. .get_strings = mv643xx_eth_get_strings,
  1400. .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
  1401. .get_flags = ethtool_op_get_flags,
  1402. .set_flags = ethtool_op_set_flags,
  1403. .get_sset_count = mv643xx_eth_get_sset_count,
  1404. };
  1405. /* address handling *********************************************************/
  1406. static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
  1407. {
  1408. unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
  1409. unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
  1410. addr[0] = (mac_h >> 24) & 0xff;
  1411. addr[1] = (mac_h >> 16) & 0xff;
  1412. addr[2] = (mac_h >> 8) & 0xff;
  1413. addr[3] = mac_h & 0xff;
  1414. addr[4] = (mac_l >> 8) & 0xff;
  1415. addr[5] = mac_l & 0xff;
  1416. }
  1417. static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
  1418. {
  1419. wrlp(mp, MAC_ADDR_HIGH,
  1420. (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
  1421. wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
  1422. }
  1423. static u32 uc_addr_filter_mask(struct net_device *dev)
  1424. {
  1425. struct dev_addr_list *uc_ptr;
  1426. u32 nibbles;
  1427. if (dev->flags & IFF_PROMISC)
  1428. return 0;
  1429. nibbles = 1 << (dev->dev_addr[5] & 0x0f);
  1430. for (uc_ptr = dev->uc_list; uc_ptr != NULL; uc_ptr = uc_ptr->next) {
  1431. if (memcmp(dev->dev_addr, uc_ptr->da_addr, 5))
  1432. return 0;
  1433. if ((dev->dev_addr[5] ^ uc_ptr->da_addr[5]) & 0xf0)
  1434. return 0;
  1435. nibbles |= 1 << (uc_ptr->da_addr[5] & 0x0f);
  1436. }
  1437. return nibbles;
  1438. }
  1439. static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
  1440. {
  1441. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1442. u32 port_config;
  1443. u32 nibbles;
  1444. int i;
  1445. uc_addr_set(mp, dev->dev_addr);
  1446. port_config = rdlp(mp, PORT_CONFIG);
  1447. nibbles = uc_addr_filter_mask(dev);
  1448. if (!nibbles) {
  1449. port_config |= UNICAST_PROMISCUOUS_MODE;
  1450. wrlp(mp, PORT_CONFIG, port_config);
  1451. return;
  1452. }
  1453. for (i = 0; i < 16; i += 4) {
  1454. int off = UNICAST_TABLE(mp->port_num) + i;
  1455. u32 v;
  1456. v = 0;
  1457. if (nibbles & 1)
  1458. v |= 0x00000001;
  1459. if (nibbles & 2)
  1460. v |= 0x00000100;
  1461. if (nibbles & 4)
  1462. v |= 0x00010000;
  1463. if (nibbles & 8)
  1464. v |= 0x01000000;
  1465. nibbles >>= 4;
  1466. wrl(mp, off, v);
  1467. }
  1468. port_config &= ~UNICAST_PROMISCUOUS_MODE;
  1469. wrlp(mp, PORT_CONFIG, port_config);
  1470. }
  1471. static int addr_crc(unsigned char *addr)
  1472. {
  1473. int crc = 0;
  1474. int i;
  1475. for (i = 0; i < 6; i++) {
  1476. int j;
  1477. crc = (crc ^ addr[i]) << 8;
  1478. for (j = 7; j >= 0; j--) {
  1479. if (crc & (0x100 << j))
  1480. crc ^= 0x107 << j;
  1481. }
  1482. }
  1483. return crc;
  1484. }
  1485. static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
  1486. {
  1487. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1488. u32 *mc_spec;
  1489. u32 *mc_other;
  1490. struct dev_addr_list *addr;
  1491. int i;
  1492. if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  1493. int port_num;
  1494. u32 accept;
  1495. int i;
  1496. oom:
  1497. port_num = mp->port_num;
  1498. accept = 0x01010101;
  1499. for (i = 0; i < 0x100; i += 4) {
  1500. wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
  1501. wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
  1502. }
  1503. return;
  1504. }
  1505. mc_spec = kmalloc(0x200, GFP_ATOMIC);
  1506. if (mc_spec == NULL)
  1507. goto oom;
  1508. mc_other = mc_spec + (0x100 >> 2);
  1509. memset(mc_spec, 0, 0x100);
  1510. memset(mc_other, 0, 0x100);
  1511. for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
  1512. u8 *a = addr->da_addr;
  1513. u32 *table;
  1514. int entry;
  1515. if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
  1516. table = mc_spec;
  1517. entry = a[5];
  1518. } else {
  1519. table = mc_other;
  1520. entry = addr_crc(a);
  1521. }
  1522. table[entry >> 2] |= 1 << (8 * (entry & 3));
  1523. }
  1524. for (i = 0; i < 0x100; i += 4) {
  1525. wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
  1526. wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
  1527. }
  1528. kfree(mc_spec);
  1529. }
  1530. static void mv643xx_eth_set_rx_mode(struct net_device *dev)
  1531. {
  1532. mv643xx_eth_program_unicast_filter(dev);
  1533. mv643xx_eth_program_multicast_filter(dev);
  1534. }
  1535. static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
  1536. {
  1537. struct sockaddr *sa = addr;
  1538. memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
  1539. netif_addr_lock_bh(dev);
  1540. mv643xx_eth_program_unicast_filter(dev);
  1541. netif_addr_unlock_bh(dev);
  1542. return 0;
  1543. }
  1544. /* rx/tx queue initialisation ***********************************************/
  1545. static int rxq_init(struct mv643xx_eth_private *mp, int index)
  1546. {
  1547. struct rx_queue *rxq = mp->rxq + index;
  1548. struct rx_desc *rx_desc;
  1549. int size;
  1550. int i;
  1551. rxq->index = index;
  1552. rxq->rx_ring_size = mp->rx_ring_size;
  1553. rxq->rx_desc_count = 0;
  1554. rxq->rx_curr_desc = 0;
  1555. rxq->rx_used_desc = 0;
  1556. size = rxq->rx_ring_size * sizeof(struct rx_desc);
  1557. if (index == 0 && size <= mp->rx_desc_sram_size) {
  1558. rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
  1559. mp->rx_desc_sram_size);
  1560. rxq->rx_desc_dma = mp->rx_desc_sram_addr;
  1561. } else {
  1562. rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
  1563. &rxq->rx_desc_dma,
  1564. GFP_KERNEL);
  1565. }
  1566. if (rxq->rx_desc_area == NULL) {
  1567. dev_printk(KERN_ERR, &mp->dev->dev,
  1568. "can't allocate rx ring (%d bytes)\n", size);
  1569. goto out;
  1570. }
  1571. memset(rxq->rx_desc_area, 0, size);
  1572. rxq->rx_desc_area_size = size;
  1573. rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
  1574. GFP_KERNEL);
  1575. if (rxq->rx_skb == NULL) {
  1576. dev_printk(KERN_ERR, &mp->dev->dev,
  1577. "can't allocate rx skb ring\n");
  1578. goto out_free;
  1579. }
  1580. rx_desc = (struct rx_desc *)rxq->rx_desc_area;
  1581. for (i = 0; i < rxq->rx_ring_size; i++) {
  1582. int nexti;
  1583. nexti = i + 1;
  1584. if (nexti == rxq->rx_ring_size)
  1585. nexti = 0;
  1586. rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
  1587. nexti * sizeof(struct rx_desc);
  1588. }
  1589. rxq->lro_mgr.dev = mp->dev;
  1590. memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats));
  1591. rxq->lro_mgr.features = LRO_F_NAPI;
  1592. rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
  1593. rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
  1594. rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr);
  1595. rxq->lro_mgr.max_aggr = 32;
  1596. rxq->lro_mgr.frag_align_pad = 0;
  1597. rxq->lro_mgr.lro_arr = rxq->lro_arr;
  1598. rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header;
  1599. memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr));
  1600. return 0;
  1601. out_free:
  1602. if (index == 0 && size <= mp->rx_desc_sram_size)
  1603. iounmap(rxq->rx_desc_area);
  1604. else
  1605. dma_free_coherent(NULL, size,
  1606. rxq->rx_desc_area,
  1607. rxq->rx_desc_dma);
  1608. out:
  1609. return -ENOMEM;
  1610. }
  1611. static void rxq_deinit(struct rx_queue *rxq)
  1612. {
  1613. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  1614. int i;
  1615. rxq_disable(rxq);
  1616. for (i = 0; i < rxq->rx_ring_size; i++) {
  1617. if (rxq->rx_skb[i]) {
  1618. dev_kfree_skb(rxq->rx_skb[i]);
  1619. rxq->rx_desc_count--;
  1620. }
  1621. }
  1622. if (rxq->rx_desc_count) {
  1623. dev_printk(KERN_ERR, &mp->dev->dev,
  1624. "error freeing rx ring -- %d skbs stuck\n",
  1625. rxq->rx_desc_count);
  1626. }
  1627. if (rxq->index == 0 &&
  1628. rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
  1629. iounmap(rxq->rx_desc_area);
  1630. else
  1631. dma_free_coherent(NULL, rxq->rx_desc_area_size,
  1632. rxq->rx_desc_area, rxq->rx_desc_dma);
  1633. kfree(rxq->rx_skb);
  1634. }
  1635. static int txq_init(struct mv643xx_eth_private *mp, int index)
  1636. {
  1637. struct tx_queue *txq = mp->txq + index;
  1638. struct tx_desc *tx_desc;
  1639. int size;
  1640. int i;
  1641. txq->index = index;
  1642. txq->tx_ring_size = mp->tx_ring_size;
  1643. txq->tx_desc_count = 0;
  1644. txq->tx_curr_desc = 0;
  1645. txq->tx_used_desc = 0;
  1646. size = txq->tx_ring_size * sizeof(struct tx_desc);
  1647. if (index == 0 && size <= mp->tx_desc_sram_size) {
  1648. txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
  1649. mp->tx_desc_sram_size);
  1650. txq->tx_desc_dma = mp->tx_desc_sram_addr;
  1651. } else {
  1652. txq->tx_desc_area = dma_alloc_coherent(NULL, size,
  1653. &txq->tx_desc_dma,
  1654. GFP_KERNEL);
  1655. }
  1656. if (txq->tx_desc_area == NULL) {
  1657. dev_printk(KERN_ERR, &mp->dev->dev,
  1658. "can't allocate tx ring (%d bytes)\n", size);
  1659. return -ENOMEM;
  1660. }
  1661. memset(txq->tx_desc_area, 0, size);
  1662. txq->tx_desc_area_size = size;
  1663. tx_desc = (struct tx_desc *)txq->tx_desc_area;
  1664. for (i = 0; i < txq->tx_ring_size; i++) {
  1665. struct tx_desc *txd = tx_desc + i;
  1666. int nexti;
  1667. nexti = i + 1;
  1668. if (nexti == txq->tx_ring_size)
  1669. nexti = 0;
  1670. txd->cmd_sts = 0;
  1671. txd->next_desc_ptr = txq->tx_desc_dma +
  1672. nexti * sizeof(struct tx_desc);
  1673. }
  1674. skb_queue_head_init(&txq->tx_skb);
  1675. return 0;
  1676. }
  1677. static void txq_deinit(struct tx_queue *txq)
  1678. {
  1679. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  1680. txq_disable(txq);
  1681. txq_reclaim(txq, txq->tx_ring_size, 1);
  1682. BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
  1683. if (txq->index == 0 &&
  1684. txq->tx_desc_area_size <= mp->tx_desc_sram_size)
  1685. iounmap(txq->tx_desc_area);
  1686. else
  1687. dma_free_coherent(NULL, txq->tx_desc_area_size,
  1688. txq->tx_desc_area, txq->tx_desc_dma);
  1689. }
  1690. /* netdev ops and related ***************************************************/
  1691. static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
  1692. {
  1693. u32 int_cause;
  1694. u32 int_cause_ext;
  1695. int_cause = rdlp(mp, INT_CAUSE) & (INT_TX_END | INT_RX | INT_EXT);
  1696. if (int_cause == 0)
  1697. return 0;
  1698. int_cause_ext = 0;
  1699. if (int_cause & INT_EXT)
  1700. int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
  1701. int_cause &= INT_TX_END | INT_RX;
  1702. if (int_cause) {
  1703. wrlp(mp, INT_CAUSE, ~int_cause);
  1704. mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
  1705. ~(rdlp(mp, TXQ_COMMAND) & 0xff);
  1706. mp->work_rx |= (int_cause & INT_RX) >> 2;
  1707. }
  1708. int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
  1709. if (int_cause_ext) {
  1710. wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
  1711. if (int_cause_ext & INT_EXT_LINK_PHY)
  1712. mp->work_link = 1;
  1713. mp->work_tx |= int_cause_ext & INT_EXT_TX;
  1714. }
  1715. return 1;
  1716. }
  1717. static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
  1718. {
  1719. struct net_device *dev = (struct net_device *)dev_id;
  1720. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1721. if (unlikely(!mv643xx_eth_collect_events(mp)))
  1722. return IRQ_NONE;
  1723. wrlp(mp, INT_MASK, 0);
  1724. napi_schedule(&mp->napi);
  1725. return IRQ_HANDLED;
  1726. }
  1727. static void handle_link_event(struct mv643xx_eth_private *mp)
  1728. {
  1729. struct net_device *dev = mp->dev;
  1730. u32 port_status;
  1731. int speed;
  1732. int duplex;
  1733. int fc;
  1734. port_status = rdlp(mp, PORT_STATUS);
  1735. if (!(port_status & LINK_UP)) {
  1736. if (netif_carrier_ok(dev)) {
  1737. int i;
  1738. printk(KERN_INFO "%s: link down\n", dev->name);
  1739. netif_carrier_off(dev);
  1740. for (i = 0; i < mp->txq_count; i++) {
  1741. struct tx_queue *txq = mp->txq + i;
  1742. txq_reclaim(txq, txq->tx_ring_size, 1);
  1743. txq_reset_hw_ptr(txq);
  1744. }
  1745. }
  1746. return;
  1747. }
  1748. switch (port_status & PORT_SPEED_MASK) {
  1749. case PORT_SPEED_10:
  1750. speed = 10;
  1751. break;
  1752. case PORT_SPEED_100:
  1753. speed = 100;
  1754. break;
  1755. case PORT_SPEED_1000:
  1756. speed = 1000;
  1757. break;
  1758. default:
  1759. speed = -1;
  1760. break;
  1761. }
  1762. duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
  1763. fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
  1764. printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
  1765. "flow control %sabled\n", dev->name,
  1766. speed, duplex ? "full" : "half",
  1767. fc ? "en" : "dis");
  1768. if (!netif_carrier_ok(dev))
  1769. netif_carrier_on(dev);
  1770. }
  1771. static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
  1772. {
  1773. struct mv643xx_eth_private *mp;
  1774. int work_done;
  1775. mp = container_of(napi, struct mv643xx_eth_private, napi);
  1776. mp->work_rx_refill |= mp->work_rx_oom;
  1777. mp->work_rx_oom = 0;
  1778. work_done = 0;
  1779. while (work_done < budget) {
  1780. u8 queue_mask;
  1781. int queue;
  1782. int work_tbd;
  1783. if (mp->work_link) {
  1784. mp->work_link = 0;
  1785. handle_link_event(mp);
  1786. continue;
  1787. }
  1788. queue_mask = mp->work_tx | mp->work_tx_end |
  1789. mp->work_rx | mp->work_rx_refill;
  1790. if (!queue_mask) {
  1791. if (mv643xx_eth_collect_events(mp))
  1792. continue;
  1793. break;
  1794. }
  1795. queue = fls(queue_mask) - 1;
  1796. queue_mask = 1 << queue;
  1797. work_tbd = budget - work_done;
  1798. if (work_tbd > 16)
  1799. work_tbd = 16;
  1800. if (mp->work_tx_end & queue_mask) {
  1801. txq_kick(mp->txq + queue);
  1802. } else if (mp->work_tx & queue_mask) {
  1803. work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
  1804. txq_maybe_wake(mp->txq + queue);
  1805. } else if (mp->work_rx & queue_mask) {
  1806. work_done += rxq_process(mp->rxq + queue, work_tbd);
  1807. } else if (mp->work_rx_refill & queue_mask) {
  1808. work_done += rxq_refill(mp->rxq + queue, work_tbd);
  1809. } else {
  1810. BUG();
  1811. }
  1812. }
  1813. if (work_done < budget) {
  1814. if (mp->work_rx_oom)
  1815. mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
  1816. napi_complete(napi);
  1817. wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
  1818. }
  1819. return work_done;
  1820. }
  1821. static inline void oom_timer_wrapper(unsigned long data)
  1822. {
  1823. struct mv643xx_eth_private *mp = (void *)data;
  1824. napi_schedule(&mp->napi);
  1825. }
  1826. static void phy_reset(struct mv643xx_eth_private *mp)
  1827. {
  1828. int data;
  1829. data = phy_read(mp->phy, MII_BMCR);
  1830. if (data < 0)
  1831. return;
  1832. data |= BMCR_RESET;
  1833. if (phy_write(mp->phy, MII_BMCR, data) < 0)
  1834. return;
  1835. do {
  1836. data = phy_read(mp->phy, MII_BMCR);
  1837. } while (data >= 0 && data & BMCR_RESET);
  1838. }
  1839. static void port_start(struct mv643xx_eth_private *mp)
  1840. {
  1841. u32 pscr;
  1842. int i;
  1843. /*
  1844. * Perform PHY reset, if there is a PHY.
  1845. */
  1846. if (mp->phy != NULL) {
  1847. struct ethtool_cmd cmd;
  1848. mv643xx_eth_get_settings(mp->dev, &cmd);
  1849. phy_reset(mp);
  1850. mv643xx_eth_set_settings(mp->dev, &cmd);
  1851. }
  1852. /*
  1853. * Configure basic link parameters.
  1854. */
  1855. pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  1856. pscr |= SERIAL_PORT_ENABLE;
  1857. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  1858. pscr |= DO_NOT_FORCE_LINK_FAIL;
  1859. if (mp->phy == NULL)
  1860. pscr |= FORCE_LINK_PASS;
  1861. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  1862. /*
  1863. * Configure TX path and queues.
  1864. */
  1865. tx_set_rate(mp, 1000000000, 16777216);
  1866. for (i = 0; i < mp->txq_count; i++) {
  1867. struct tx_queue *txq = mp->txq + i;
  1868. txq_reset_hw_ptr(txq);
  1869. txq_set_rate(txq, 1000000000, 16777216);
  1870. txq_set_fixed_prio_mode(txq);
  1871. }
  1872. /*
  1873. * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
  1874. * frames to RX queue #0, and include the pseudo-header when
  1875. * calculating receive checksums.
  1876. */
  1877. wrlp(mp, PORT_CONFIG, 0x02000000);
  1878. /*
  1879. * Treat BPDUs as normal multicasts, and disable partition mode.
  1880. */
  1881. wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
  1882. /*
  1883. * Add configured unicast addresses to address filter table.
  1884. */
  1885. mv643xx_eth_program_unicast_filter(mp->dev);
  1886. /*
  1887. * Enable the receive queues.
  1888. */
  1889. for (i = 0; i < mp->rxq_count; i++) {
  1890. struct rx_queue *rxq = mp->rxq + i;
  1891. u32 addr;
  1892. addr = (u32)rxq->rx_desc_dma;
  1893. addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
  1894. wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
  1895. rxq_enable(rxq);
  1896. }
  1897. }
  1898. static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
  1899. {
  1900. int skb_size;
  1901. /*
  1902. * Reserve 2+14 bytes for an ethernet header (the hardware
  1903. * automatically prepends 2 bytes of dummy data to each
  1904. * received packet), 16 bytes for up to four VLAN tags, and
  1905. * 4 bytes for the trailing FCS -- 36 bytes total.
  1906. */
  1907. skb_size = mp->dev->mtu + 36;
  1908. /*
  1909. * Make sure that the skb size is a multiple of 8 bytes, as
  1910. * the lower three bits of the receive descriptor's buffer
  1911. * size field are ignored by the hardware.
  1912. */
  1913. mp->skb_size = (skb_size + 7) & ~7;
  1914. }
  1915. static int mv643xx_eth_open(struct net_device *dev)
  1916. {
  1917. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1918. int err;
  1919. int i;
  1920. wrlp(mp, INT_CAUSE, 0);
  1921. wrlp(mp, INT_CAUSE_EXT, 0);
  1922. rdlp(mp, INT_CAUSE_EXT);
  1923. err = request_irq(dev->irq, mv643xx_eth_irq,
  1924. IRQF_SHARED, dev->name, dev);
  1925. if (err) {
  1926. dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
  1927. return -EAGAIN;
  1928. }
  1929. mv643xx_eth_recalc_skb_size(mp);
  1930. napi_enable(&mp->napi);
  1931. skb_queue_head_init(&mp->rx_recycle);
  1932. for (i = 0; i < mp->rxq_count; i++) {
  1933. err = rxq_init(mp, i);
  1934. if (err) {
  1935. while (--i >= 0)
  1936. rxq_deinit(mp->rxq + i);
  1937. goto out;
  1938. }
  1939. rxq_refill(mp->rxq + i, INT_MAX);
  1940. }
  1941. if (mp->work_rx_oom) {
  1942. mp->rx_oom.expires = jiffies + (HZ / 10);
  1943. add_timer(&mp->rx_oom);
  1944. }
  1945. for (i = 0; i < mp->txq_count; i++) {
  1946. err = txq_init(mp, i);
  1947. if (err) {
  1948. while (--i >= 0)
  1949. txq_deinit(mp->txq + i);
  1950. goto out_free;
  1951. }
  1952. }
  1953. port_start(mp);
  1954. wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
  1955. wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
  1956. return 0;
  1957. out_free:
  1958. for (i = 0; i < mp->rxq_count; i++)
  1959. rxq_deinit(mp->rxq + i);
  1960. out:
  1961. free_irq(dev->irq, dev);
  1962. return err;
  1963. }
  1964. static void port_reset(struct mv643xx_eth_private *mp)
  1965. {
  1966. unsigned int data;
  1967. int i;
  1968. for (i = 0; i < mp->rxq_count; i++)
  1969. rxq_disable(mp->rxq + i);
  1970. for (i = 0; i < mp->txq_count; i++)
  1971. txq_disable(mp->txq + i);
  1972. while (1) {
  1973. u32 ps = rdlp(mp, PORT_STATUS);
  1974. if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
  1975. break;
  1976. udelay(10);
  1977. }
  1978. /* Reset the Enable bit in the Configuration Register */
  1979. data = rdlp(mp, PORT_SERIAL_CONTROL);
  1980. data &= ~(SERIAL_PORT_ENABLE |
  1981. DO_NOT_FORCE_LINK_FAIL |
  1982. FORCE_LINK_PASS);
  1983. wrlp(mp, PORT_SERIAL_CONTROL, data);
  1984. }
  1985. static int mv643xx_eth_stop(struct net_device *dev)
  1986. {
  1987. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1988. int i;
  1989. wrlp(mp, INT_MASK_EXT, 0x00000000);
  1990. wrlp(mp, INT_MASK, 0x00000000);
  1991. rdlp(mp, INT_MASK);
  1992. napi_disable(&mp->napi);
  1993. del_timer_sync(&mp->rx_oom);
  1994. netif_carrier_off(dev);
  1995. free_irq(dev->irq, dev);
  1996. port_reset(mp);
  1997. mv643xx_eth_get_stats(dev);
  1998. mib_counters_update(mp);
  1999. del_timer_sync(&mp->mib_counters_timer);
  2000. skb_queue_purge(&mp->rx_recycle);
  2001. for (i = 0; i < mp->rxq_count; i++)
  2002. rxq_deinit(mp->rxq + i);
  2003. for (i = 0; i < mp->txq_count; i++)
  2004. txq_deinit(mp->txq + i);
  2005. return 0;
  2006. }
  2007. static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  2008. {
  2009. struct mv643xx_eth_private *mp = netdev_priv(dev);
  2010. if (mp->phy != NULL)
  2011. return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd);
  2012. return -EOPNOTSUPP;
  2013. }
  2014. static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
  2015. {
  2016. struct mv643xx_eth_private *mp = netdev_priv(dev);
  2017. if (new_mtu < 64 || new_mtu > 9500)
  2018. return -EINVAL;
  2019. dev->mtu = new_mtu;
  2020. mv643xx_eth_recalc_skb_size(mp);
  2021. tx_set_rate(mp, 1000000000, 16777216);
  2022. if (!netif_running(dev))
  2023. return 0;
  2024. /*
  2025. * Stop and then re-open the interface. This will allocate RX
  2026. * skbs of the new MTU.
  2027. * There is a possible danger that the open will not succeed,
  2028. * due to memory being full.
  2029. */
  2030. mv643xx_eth_stop(dev);
  2031. if (mv643xx_eth_open(dev)) {
  2032. dev_printk(KERN_ERR, &dev->dev,
  2033. "fatal error on re-opening device after "
  2034. "MTU change\n");
  2035. }
  2036. return 0;
  2037. }
  2038. static void tx_timeout_task(struct work_struct *ugly)
  2039. {
  2040. struct mv643xx_eth_private *mp;
  2041. mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
  2042. if (netif_running(mp->dev)) {
  2043. netif_tx_stop_all_queues(mp->dev);
  2044. port_reset(mp);
  2045. port_start(mp);
  2046. netif_tx_wake_all_queues(mp->dev);
  2047. }
  2048. }
  2049. static void mv643xx_eth_tx_timeout(struct net_device *dev)
  2050. {
  2051. struct mv643xx_eth_private *mp = netdev_priv(dev);
  2052. dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
  2053. schedule_work(&mp->tx_timeout_task);
  2054. }
  2055. #ifdef CONFIG_NET_POLL_CONTROLLER
  2056. static void mv643xx_eth_netpoll(struct net_device *dev)
  2057. {
  2058. struct mv643xx_eth_private *mp = netdev_priv(dev);
  2059. wrlp(mp, INT_MASK, 0x00000000);
  2060. rdlp(mp, INT_MASK);
  2061. mv643xx_eth_irq(dev->irq, dev);
  2062. wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
  2063. }
  2064. #endif
  2065. /* platform glue ************************************************************/
  2066. static void
  2067. mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
  2068. struct mbus_dram_target_info *dram)
  2069. {
  2070. void __iomem *base = msp->base;
  2071. u32 win_enable;
  2072. u32 win_protect;
  2073. int i;
  2074. for (i = 0; i < 6; i++) {
  2075. writel(0, base + WINDOW_BASE(i));
  2076. writel(0, base + WINDOW_SIZE(i));
  2077. if (i < 4)
  2078. writel(0, base + WINDOW_REMAP_HIGH(i));
  2079. }
  2080. win_enable = 0x3f;
  2081. win_protect = 0;
  2082. for (i = 0; i < dram->num_cs; i++) {
  2083. struct mbus_dram_window *cs = dram->cs + i;
  2084. writel((cs->base & 0xffff0000) |
  2085. (cs->mbus_attr << 8) |
  2086. dram->mbus_dram_target_id, base + WINDOW_BASE(i));
  2087. writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
  2088. win_enable &= ~(1 << i);
  2089. win_protect |= 3 << (2 * i);
  2090. }
  2091. writel(win_enable, base + WINDOW_BAR_ENABLE);
  2092. msp->win_protect = win_protect;
  2093. }
  2094. static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
  2095. {
  2096. /*
  2097. * Check whether we have a 14-bit coal limit field in bits
  2098. * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
  2099. * SDMA config register.
  2100. */
  2101. writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
  2102. if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
  2103. msp->extended_rx_coal_limit = 1;
  2104. else
  2105. msp->extended_rx_coal_limit = 0;
  2106. /*
  2107. * Check whether the MAC supports TX rate control, and if
  2108. * yes, whether its associated registers are in the old or
  2109. * the new place.
  2110. */
  2111. writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
  2112. if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
  2113. msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
  2114. } else {
  2115. writel(7, msp->base + 0x0400 + TX_BW_RATE);
  2116. if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
  2117. msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
  2118. else
  2119. msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
  2120. }
  2121. }
  2122. static int mv643xx_eth_shared_probe(struct platform_device *pdev)
  2123. {
  2124. static int mv643xx_eth_version_printed;
  2125. struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
  2126. struct mv643xx_eth_shared_private *msp;
  2127. struct resource *res;
  2128. int ret;
  2129. if (!mv643xx_eth_version_printed++)
  2130. printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
  2131. "driver version %s\n", mv643xx_eth_driver_version);
  2132. ret = -EINVAL;
  2133. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2134. if (res == NULL)
  2135. goto out;
  2136. ret = -ENOMEM;
  2137. msp = kmalloc(sizeof(*msp), GFP_KERNEL);
  2138. if (msp == NULL)
  2139. goto out;
  2140. memset(msp, 0, sizeof(*msp));
  2141. msp->base = ioremap(res->start, res->end - res->start + 1);
  2142. if (msp->base == NULL)
  2143. goto out_free;
  2144. /*
  2145. * Set up and register SMI bus.
  2146. */
  2147. if (pd == NULL || pd->shared_smi == NULL) {
  2148. msp->smi_bus = mdiobus_alloc();
  2149. if (msp->smi_bus == NULL)
  2150. goto out_unmap;
  2151. msp->smi_bus->priv = msp;
  2152. msp->smi_bus->name = "mv643xx_eth smi";
  2153. msp->smi_bus->read = smi_bus_read;
  2154. msp->smi_bus->write = smi_bus_write,
  2155. snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
  2156. msp->smi_bus->parent = &pdev->dev;
  2157. msp->smi_bus->phy_mask = 0xffffffff;
  2158. if (mdiobus_register(msp->smi_bus) < 0)
  2159. goto out_free_mii_bus;
  2160. msp->smi = msp;
  2161. } else {
  2162. msp->smi = platform_get_drvdata(pd->shared_smi);
  2163. }
  2164. msp->err_interrupt = NO_IRQ;
  2165. init_waitqueue_head(&msp->smi_busy_wait);
  2166. /*
  2167. * Check whether the error interrupt is hooked up.
  2168. */
  2169. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  2170. if (res != NULL) {
  2171. int err;
  2172. err = request_irq(res->start, mv643xx_eth_err_irq,
  2173. IRQF_SHARED, "mv643xx_eth", msp);
  2174. if (!err) {
  2175. writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
  2176. msp->err_interrupt = res->start;
  2177. }
  2178. }
  2179. /*
  2180. * (Re-)program MBUS remapping windows if we are asked to.
  2181. */
  2182. if (pd != NULL && pd->dram != NULL)
  2183. mv643xx_eth_conf_mbus_windows(msp, pd->dram);
  2184. /*
  2185. * Detect hardware parameters.
  2186. */
  2187. msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
  2188. infer_hw_params(msp);
  2189. platform_set_drvdata(pdev, msp);
  2190. return 0;
  2191. out_free_mii_bus:
  2192. mdiobus_free(msp->smi_bus);
  2193. out_unmap:
  2194. iounmap(msp->base);
  2195. out_free:
  2196. kfree(msp);
  2197. out:
  2198. return ret;
  2199. }
  2200. static int mv643xx_eth_shared_remove(struct platform_device *pdev)
  2201. {
  2202. struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
  2203. struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
  2204. if (pd == NULL || pd->shared_smi == NULL) {
  2205. mdiobus_unregister(msp->smi_bus);
  2206. mdiobus_free(msp->smi_bus);
  2207. }
  2208. if (msp->err_interrupt != NO_IRQ)
  2209. free_irq(msp->err_interrupt, msp);
  2210. iounmap(msp->base);
  2211. kfree(msp);
  2212. return 0;
  2213. }
  2214. static struct platform_driver mv643xx_eth_shared_driver = {
  2215. .probe = mv643xx_eth_shared_probe,
  2216. .remove = mv643xx_eth_shared_remove,
  2217. .driver = {
  2218. .name = MV643XX_ETH_SHARED_NAME,
  2219. .owner = THIS_MODULE,
  2220. },
  2221. };
  2222. static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
  2223. {
  2224. int addr_shift = 5 * mp->port_num;
  2225. u32 data;
  2226. data = rdl(mp, PHY_ADDR);
  2227. data &= ~(0x1f << addr_shift);
  2228. data |= (phy_addr & 0x1f) << addr_shift;
  2229. wrl(mp, PHY_ADDR, data);
  2230. }
  2231. static int phy_addr_get(struct mv643xx_eth_private *mp)
  2232. {
  2233. unsigned int data;
  2234. data = rdl(mp, PHY_ADDR);
  2235. return (data >> (5 * mp->port_num)) & 0x1f;
  2236. }
  2237. static void set_params(struct mv643xx_eth_private *mp,
  2238. struct mv643xx_eth_platform_data *pd)
  2239. {
  2240. struct net_device *dev = mp->dev;
  2241. if (is_valid_ether_addr(pd->mac_addr))
  2242. memcpy(dev->dev_addr, pd->mac_addr, 6);
  2243. else
  2244. uc_addr_get(mp, dev->dev_addr);
  2245. mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
  2246. if (pd->rx_queue_size)
  2247. mp->rx_ring_size = pd->rx_queue_size;
  2248. mp->rx_desc_sram_addr = pd->rx_sram_addr;
  2249. mp->rx_desc_sram_size = pd->rx_sram_size;
  2250. mp->rxq_count = pd->rx_queue_count ? : 1;
  2251. mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
  2252. if (pd->tx_queue_size)
  2253. mp->tx_ring_size = pd->tx_queue_size;
  2254. mp->tx_desc_sram_addr = pd->tx_sram_addr;
  2255. mp->tx_desc_sram_size = pd->tx_sram_size;
  2256. mp->txq_count = pd->tx_queue_count ? : 1;
  2257. }
  2258. static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
  2259. int phy_addr)
  2260. {
  2261. struct mii_bus *bus = mp->shared->smi->smi_bus;
  2262. struct phy_device *phydev;
  2263. int start;
  2264. int num;
  2265. int i;
  2266. if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
  2267. start = phy_addr_get(mp) & 0x1f;
  2268. num = 32;
  2269. } else {
  2270. start = phy_addr & 0x1f;
  2271. num = 1;
  2272. }
  2273. phydev = NULL;
  2274. for (i = 0; i < num; i++) {
  2275. int addr = (start + i) & 0x1f;
  2276. if (bus->phy_map[addr] == NULL)
  2277. mdiobus_scan(bus, addr);
  2278. if (phydev == NULL) {
  2279. phydev = bus->phy_map[addr];
  2280. if (phydev != NULL)
  2281. phy_addr_set(mp, addr);
  2282. }
  2283. }
  2284. return phydev;
  2285. }
  2286. static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
  2287. {
  2288. struct phy_device *phy = mp->phy;
  2289. phy_reset(mp);
  2290. phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII);
  2291. if (speed == 0) {
  2292. phy->autoneg = AUTONEG_ENABLE;
  2293. phy->speed = 0;
  2294. phy->duplex = 0;
  2295. phy->advertising = phy->supported | ADVERTISED_Autoneg;
  2296. } else {
  2297. phy->autoneg = AUTONEG_DISABLE;
  2298. phy->advertising = 0;
  2299. phy->speed = speed;
  2300. phy->duplex = duplex;
  2301. }
  2302. phy_start_aneg(phy);
  2303. }
  2304. static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
  2305. {
  2306. u32 pscr;
  2307. pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  2308. if (pscr & SERIAL_PORT_ENABLE) {
  2309. pscr &= ~SERIAL_PORT_ENABLE;
  2310. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  2311. }
  2312. pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
  2313. if (mp->phy == NULL) {
  2314. pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
  2315. if (speed == SPEED_1000)
  2316. pscr |= SET_GMII_SPEED_TO_1000;
  2317. else if (speed == SPEED_100)
  2318. pscr |= SET_MII_SPEED_TO_100;
  2319. pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
  2320. pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
  2321. if (duplex == DUPLEX_FULL)
  2322. pscr |= SET_FULL_DUPLEX_MODE;
  2323. }
  2324. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  2325. }
  2326. static const struct net_device_ops mv643xx_eth_netdev_ops = {
  2327. .ndo_open = mv643xx_eth_open,
  2328. .ndo_stop = mv643xx_eth_stop,
  2329. .ndo_start_xmit = mv643xx_eth_xmit,
  2330. .ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
  2331. .ndo_set_mac_address = mv643xx_eth_set_mac_address,
  2332. .ndo_do_ioctl = mv643xx_eth_ioctl,
  2333. .ndo_change_mtu = mv643xx_eth_change_mtu,
  2334. .ndo_tx_timeout = mv643xx_eth_tx_timeout,
  2335. .ndo_get_stats = mv643xx_eth_get_stats,
  2336. #ifdef CONFIG_NET_POLL_CONTROLLER
  2337. .ndo_poll_controller = mv643xx_eth_netpoll,
  2338. #endif
  2339. };
  2340. static int mv643xx_eth_probe(struct platform_device *pdev)
  2341. {
  2342. struct mv643xx_eth_platform_data *pd;
  2343. struct mv643xx_eth_private *mp;
  2344. struct net_device *dev;
  2345. struct resource *res;
  2346. int err;
  2347. pd = pdev->dev.platform_data;
  2348. if (pd == NULL) {
  2349. dev_printk(KERN_ERR, &pdev->dev,
  2350. "no mv643xx_eth_platform_data\n");
  2351. return -ENODEV;
  2352. }
  2353. if (pd->shared == NULL) {
  2354. dev_printk(KERN_ERR, &pdev->dev,
  2355. "no mv643xx_eth_platform_data->shared\n");
  2356. return -ENODEV;
  2357. }
  2358. dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
  2359. if (!dev)
  2360. return -ENOMEM;
  2361. mp = netdev_priv(dev);
  2362. platform_set_drvdata(pdev, mp);
  2363. mp->shared = platform_get_drvdata(pd->shared);
  2364. mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
  2365. mp->port_num = pd->port_number;
  2366. mp->dev = dev;
  2367. set_params(mp, pd);
  2368. dev->real_num_tx_queues = mp->txq_count;
  2369. if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
  2370. mp->phy = phy_scan(mp, pd->phy_addr);
  2371. if (mp->phy != NULL)
  2372. phy_init(mp, pd->speed, pd->duplex);
  2373. SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
  2374. init_pscr(mp, pd->speed, pd->duplex);
  2375. mib_counters_clear(mp);
  2376. init_timer(&mp->mib_counters_timer);
  2377. mp->mib_counters_timer.data = (unsigned long)mp;
  2378. mp->mib_counters_timer.function = mib_counters_timer_wrapper;
  2379. mp->mib_counters_timer.expires = jiffies + 30 * HZ;
  2380. add_timer(&mp->mib_counters_timer);
  2381. spin_lock_init(&mp->mib_counters_lock);
  2382. INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
  2383. netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
  2384. init_timer(&mp->rx_oom);
  2385. mp->rx_oom.data = (unsigned long)mp;
  2386. mp->rx_oom.function = oom_timer_wrapper;
  2387. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  2388. BUG_ON(!res);
  2389. dev->irq = res->start;
  2390. dev->netdev_ops = &mv643xx_eth_netdev_ops;
  2391. dev->watchdog_timeo = 2 * HZ;
  2392. dev->base_addr = 0;
  2393. dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
  2394. dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
  2395. SET_NETDEV_DEV(dev, &pdev->dev);
  2396. if (mp->shared->win_protect)
  2397. wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
  2398. netif_carrier_off(dev);
  2399. wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
  2400. set_rx_coal(mp, 250);
  2401. set_tx_coal(mp, 0);
  2402. err = register_netdev(dev);
  2403. if (err)
  2404. goto out;
  2405. dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n",
  2406. mp->port_num, dev->dev_addr);
  2407. if (mp->tx_desc_sram_size > 0)
  2408. dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
  2409. return 0;
  2410. out:
  2411. free_netdev(dev);
  2412. return err;
  2413. }
  2414. static int mv643xx_eth_remove(struct platform_device *pdev)
  2415. {
  2416. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2417. unregister_netdev(mp->dev);
  2418. if (mp->phy != NULL)
  2419. phy_detach(mp->phy);
  2420. flush_scheduled_work();
  2421. free_netdev(mp->dev);
  2422. platform_set_drvdata(pdev, NULL);
  2423. return 0;
  2424. }
  2425. static void mv643xx_eth_shutdown(struct platform_device *pdev)
  2426. {
  2427. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2428. /* Mask all interrupts on ethernet port */
  2429. wrlp(mp, INT_MASK, 0);
  2430. rdlp(mp, INT_MASK);
  2431. if (netif_running(mp->dev))
  2432. port_reset(mp);
  2433. }
  2434. static struct platform_driver mv643xx_eth_driver = {
  2435. .probe = mv643xx_eth_probe,
  2436. .remove = mv643xx_eth_remove,
  2437. .shutdown = mv643xx_eth_shutdown,
  2438. .driver = {
  2439. .name = MV643XX_ETH_NAME,
  2440. .owner = THIS_MODULE,
  2441. },
  2442. };
  2443. static int __init mv643xx_eth_init_module(void)
  2444. {
  2445. int rc;
  2446. rc = platform_driver_register(&mv643xx_eth_shared_driver);
  2447. if (!rc) {
  2448. rc = platform_driver_register(&mv643xx_eth_driver);
  2449. if (rc)
  2450. platform_driver_unregister(&mv643xx_eth_shared_driver);
  2451. }
  2452. return rc;
  2453. }
  2454. module_init(mv643xx_eth_init_module);
  2455. static void __exit mv643xx_eth_cleanup_module(void)
  2456. {
  2457. platform_driver_unregister(&mv643xx_eth_driver);
  2458. platform_driver_unregister(&mv643xx_eth_shared_driver);
  2459. }
  2460. module_exit(mv643xx_eth_cleanup_module);
  2461. MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
  2462. "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
  2463. MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
  2464. MODULE_LICENSE("GPL");
  2465. MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
  2466. MODULE_ALIAS("platform:" MV643XX_ETH_NAME);