mv643xx_eth.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808
  1. /*
  2. * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
  3. * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
  4. *
  5. * Based on the 64360 driver from:
  6. * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
  7. * Rabeeh Khoury <rabeeh@marvell.com>
  8. *
  9. * Copyright (C) 2003 PMC-Sierra, Inc.,
  10. * written by Manish Lachwani
  11. *
  12. * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
  13. *
  14. * Copyright (C) 2004-2006 MontaVista Software, Inc.
  15. * Dale Farnsworth <dale@farnsworth.org>
  16. *
  17. * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
  18. * <sjhill@realitydiluted.com>
  19. *
  20. * Copyright (C) 2007-2008 Marvell Semiconductor
  21. * Lennert Buytenhek <buytenh@marvell.com>
  22. *
  23. * This program is free software; you can redistribute it and/or
  24. * modify it under the terms of the GNU General Public License
  25. * as published by the Free Software Foundation; either version 2
  26. * of the License, or (at your option) any later version.
  27. *
  28. * This program is distributed in the hope that it will be useful,
  29. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  31. * GNU General Public License for more details.
  32. *
  33. * You should have received a copy of the GNU General Public License
  34. * along with this program; if not, write to the Free Software
  35. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  36. */
  37. #include <linux/init.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/in.h>
  40. #include <linux/ip.h>
  41. #include <linux/tcp.h>
  42. #include <linux/udp.h>
  43. #include <linux/etherdevice.h>
  44. #include <linux/delay.h>
  45. #include <linux/ethtool.h>
  46. #include <linux/platform_device.h>
  47. #include <linux/module.h>
  48. #include <linux/kernel.h>
  49. #include <linux/spinlock.h>
  50. #include <linux/workqueue.h>
  51. #include <linux/phy.h>
  52. #include <linux/mv643xx_eth.h>
  53. #include <linux/io.h>
  54. #include <linux/types.h>
  55. #include <asm/system.h>
  56. static char mv643xx_eth_driver_name[] = "mv643xx_eth";
  57. static char mv643xx_eth_driver_version[] = "1.4";
  58. /*
  59. * Registers shared between all ports.
  60. */
  61. #define PHY_ADDR 0x0000
  62. #define SMI_REG 0x0004
  63. #define SMI_BUSY 0x10000000
  64. #define SMI_READ_VALID 0x08000000
  65. #define SMI_OPCODE_READ 0x04000000
  66. #define SMI_OPCODE_WRITE 0x00000000
  67. #define ERR_INT_CAUSE 0x0080
  68. #define ERR_INT_SMI_DONE 0x00000010
  69. #define ERR_INT_MASK 0x0084
  70. #define WINDOW_BASE(w) (0x0200 + ((w) << 3))
  71. #define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
  72. #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
  73. #define WINDOW_BAR_ENABLE 0x0290
  74. #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
  75. /*
  76. * Main per-port registers. These live at offset 0x0400 for
  77. * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
  78. */
  79. #define PORT_CONFIG 0x0000
  80. #define UNICAST_PROMISCUOUS_MODE 0x00000001
  81. #define PORT_CONFIG_EXT 0x0004
  82. #define MAC_ADDR_LOW 0x0014
  83. #define MAC_ADDR_HIGH 0x0018
  84. #define SDMA_CONFIG 0x001c
  85. #define PORT_SERIAL_CONTROL 0x003c
  86. #define PORT_STATUS 0x0044
  87. #define TX_FIFO_EMPTY 0x00000400
  88. #define TX_IN_PROGRESS 0x00000080
  89. #define PORT_SPEED_MASK 0x00000030
  90. #define PORT_SPEED_1000 0x00000010
  91. #define PORT_SPEED_100 0x00000020
  92. #define PORT_SPEED_10 0x00000000
  93. #define FLOW_CONTROL_ENABLED 0x00000008
  94. #define FULL_DUPLEX 0x00000004
  95. #define LINK_UP 0x00000002
  96. #define TXQ_COMMAND 0x0048
  97. #define TXQ_FIX_PRIO_CONF 0x004c
  98. #define TX_BW_RATE 0x0050
  99. #define TX_BW_MTU 0x0058
  100. #define TX_BW_BURST 0x005c
  101. #define INT_CAUSE 0x0060
  102. #define INT_TX_END 0x07f80000
  103. #define INT_RX 0x000003fc
  104. #define INT_EXT 0x00000002
  105. #define INT_CAUSE_EXT 0x0064
  106. #define INT_EXT_LINK_PHY 0x00110000
  107. #define INT_EXT_TX 0x000000ff
  108. #define INT_MASK 0x0068
  109. #define INT_MASK_EXT 0x006c
  110. #define TX_FIFO_URGENT_THRESHOLD 0x0074
  111. #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
  112. #define TX_BW_RATE_MOVED 0x00e0
  113. #define TX_BW_MTU_MOVED 0x00e8
  114. #define TX_BW_BURST_MOVED 0x00ec
  115. #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
  116. #define RXQ_COMMAND 0x0280
  117. #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
  118. #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
  119. #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
  120. #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
  121. /*
  122. * Misc per-port registers.
  123. */
  124. #define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
  125. #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
  126. #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
  127. #define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
  128. /*
  129. * SDMA configuration register.
  130. */
  131. #define RX_BURST_SIZE_16_64BIT (4 << 1)
  132. #define BLM_RX_NO_SWAP (1 << 4)
  133. #define BLM_TX_NO_SWAP (1 << 5)
  134. #define TX_BURST_SIZE_16_64BIT (4 << 22)
  135. #if defined(__BIG_ENDIAN)
  136. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  137. (RX_BURST_SIZE_16_64BIT | \
  138. TX_BURST_SIZE_16_64BIT)
  139. #elif defined(__LITTLE_ENDIAN)
  140. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  141. (RX_BURST_SIZE_16_64BIT | \
  142. BLM_RX_NO_SWAP | \
  143. BLM_TX_NO_SWAP | \
  144. TX_BURST_SIZE_16_64BIT)
  145. #else
  146. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  147. #endif
  148. /*
  149. * Port serial control register.
  150. */
  151. #define SET_MII_SPEED_TO_100 (1 << 24)
  152. #define SET_GMII_SPEED_TO_1000 (1 << 23)
  153. #define SET_FULL_DUPLEX_MODE (1 << 21)
  154. #define MAX_RX_PACKET_9700BYTE (5 << 17)
  155. #define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
  156. #define DO_NOT_FORCE_LINK_FAIL (1 << 10)
  157. #define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
  158. #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
  159. #define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2)
  160. #define FORCE_LINK_PASS (1 << 1)
  161. #define SERIAL_PORT_ENABLE (1 << 0)
  162. #define DEFAULT_RX_QUEUE_SIZE 128
  163. #define DEFAULT_TX_QUEUE_SIZE 256
  164. /*
  165. * RX/TX descriptors.
  166. */
  167. #if defined(__BIG_ENDIAN)
  168. struct rx_desc {
  169. u16 byte_cnt; /* Descriptor buffer byte count */
  170. u16 buf_size; /* Buffer size */
  171. u32 cmd_sts; /* Descriptor command status */
  172. u32 next_desc_ptr; /* Next descriptor pointer */
  173. u32 buf_ptr; /* Descriptor buffer pointer */
  174. };
  175. struct tx_desc {
  176. u16 byte_cnt; /* buffer byte count */
  177. u16 l4i_chk; /* CPU provided TCP checksum */
  178. u32 cmd_sts; /* Command/status field */
  179. u32 next_desc_ptr; /* Pointer to next descriptor */
  180. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  181. };
  182. #elif defined(__LITTLE_ENDIAN)
  183. struct rx_desc {
  184. u32 cmd_sts; /* Descriptor command status */
  185. u16 buf_size; /* Buffer size */
  186. u16 byte_cnt; /* Descriptor buffer byte count */
  187. u32 buf_ptr; /* Descriptor buffer pointer */
  188. u32 next_desc_ptr; /* Next descriptor pointer */
  189. };
  190. struct tx_desc {
  191. u32 cmd_sts; /* Command/status field */
  192. u16 l4i_chk; /* CPU provided TCP checksum */
  193. u16 byte_cnt; /* buffer byte count */
  194. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  195. u32 next_desc_ptr; /* Pointer to next descriptor */
  196. };
  197. #else
  198. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  199. #endif
  200. /* RX & TX descriptor command */
  201. #define BUFFER_OWNED_BY_DMA 0x80000000
  202. /* RX & TX descriptor status */
  203. #define ERROR_SUMMARY 0x00000001
  204. /* RX descriptor status */
  205. #define LAYER_4_CHECKSUM_OK 0x40000000
  206. #define RX_ENABLE_INTERRUPT 0x20000000
  207. #define RX_FIRST_DESC 0x08000000
  208. #define RX_LAST_DESC 0x04000000
  209. /* TX descriptor command */
  210. #define TX_ENABLE_INTERRUPT 0x00800000
  211. #define GEN_CRC 0x00400000
  212. #define TX_FIRST_DESC 0x00200000
  213. #define TX_LAST_DESC 0x00100000
  214. #define ZERO_PADDING 0x00080000
  215. #define GEN_IP_V4_CHECKSUM 0x00040000
  216. #define GEN_TCP_UDP_CHECKSUM 0x00020000
  217. #define UDP_FRAME 0x00010000
  218. #define MAC_HDR_EXTRA_4_BYTES 0x00008000
  219. #define MAC_HDR_EXTRA_8_BYTES 0x00000200
  220. #define TX_IHL_SHIFT 11
  221. /* global *******************************************************************/
  222. struct mv643xx_eth_shared_private {
  223. /*
  224. * Ethernet controller base address.
  225. */
  226. void __iomem *base;
  227. /*
  228. * Points at the right SMI instance to use.
  229. */
  230. struct mv643xx_eth_shared_private *smi;
  231. /*
  232. * Provides access to local SMI interface.
  233. */
  234. struct mii_bus *smi_bus;
  235. /*
  236. * If we have access to the error interrupt pin (which is
  237. * somewhat misnamed as it not only reflects internal errors
  238. * but also reflects SMI completion), use that to wait for
  239. * SMI access completion instead of polling the SMI busy bit.
  240. */
  241. int err_interrupt;
  242. wait_queue_head_t smi_busy_wait;
  243. /*
  244. * Per-port MBUS window access register value.
  245. */
  246. u32 win_protect;
  247. /*
  248. * Hardware-specific parameters.
  249. */
  250. unsigned int t_clk;
  251. int extended_rx_coal_limit;
  252. int tx_bw_control;
  253. };
  254. #define TX_BW_CONTROL_ABSENT 0
  255. #define TX_BW_CONTROL_OLD_LAYOUT 1
  256. #define TX_BW_CONTROL_NEW_LAYOUT 2
  257. /* per-port *****************************************************************/
  258. struct mib_counters {
  259. u64 good_octets_received;
  260. u32 bad_octets_received;
  261. u32 internal_mac_transmit_err;
  262. u32 good_frames_received;
  263. u32 bad_frames_received;
  264. u32 broadcast_frames_received;
  265. u32 multicast_frames_received;
  266. u32 frames_64_octets;
  267. u32 frames_65_to_127_octets;
  268. u32 frames_128_to_255_octets;
  269. u32 frames_256_to_511_octets;
  270. u32 frames_512_to_1023_octets;
  271. u32 frames_1024_to_max_octets;
  272. u64 good_octets_sent;
  273. u32 good_frames_sent;
  274. u32 excessive_collision;
  275. u32 multicast_frames_sent;
  276. u32 broadcast_frames_sent;
  277. u32 unrec_mac_control_received;
  278. u32 fc_sent;
  279. u32 good_fc_received;
  280. u32 bad_fc_received;
  281. u32 undersize_received;
  282. u32 fragments_received;
  283. u32 oversize_received;
  284. u32 jabber_received;
  285. u32 mac_receive_error;
  286. u32 bad_crc_event;
  287. u32 collision;
  288. u32 late_collision;
  289. };
  290. struct rx_queue {
  291. int index;
  292. int rx_ring_size;
  293. int rx_desc_count;
  294. int rx_curr_desc;
  295. int rx_used_desc;
  296. struct rx_desc *rx_desc_area;
  297. dma_addr_t rx_desc_dma;
  298. int rx_desc_area_size;
  299. struct sk_buff **rx_skb;
  300. };
  301. struct tx_queue {
  302. int index;
  303. int tx_ring_size;
  304. int tx_desc_count;
  305. int tx_curr_desc;
  306. int tx_used_desc;
  307. struct tx_desc *tx_desc_area;
  308. dma_addr_t tx_desc_dma;
  309. int tx_desc_area_size;
  310. struct sk_buff_head tx_skb;
  311. unsigned long tx_packets;
  312. unsigned long tx_bytes;
  313. unsigned long tx_dropped;
  314. };
  315. struct mv643xx_eth_private {
  316. struct mv643xx_eth_shared_private *shared;
  317. void __iomem *base;
  318. int port_num;
  319. struct net_device *dev;
  320. struct phy_device *phy;
  321. struct timer_list mib_counters_timer;
  322. spinlock_t mib_counters_lock;
  323. struct mib_counters mib_counters;
  324. struct work_struct tx_timeout_task;
  325. struct napi_struct napi;
  326. u8 work_link;
  327. u8 work_tx;
  328. u8 work_tx_end;
  329. u8 work_rx;
  330. u8 work_rx_refill;
  331. u8 work_rx_oom;
  332. int skb_size;
  333. struct sk_buff_head rx_recycle;
  334. /*
  335. * RX state.
  336. */
  337. int default_rx_ring_size;
  338. unsigned long rx_desc_sram_addr;
  339. int rx_desc_sram_size;
  340. int rxq_count;
  341. struct timer_list rx_oom;
  342. struct rx_queue rxq[8];
  343. /*
  344. * TX state.
  345. */
  346. int default_tx_ring_size;
  347. unsigned long tx_desc_sram_addr;
  348. int tx_desc_sram_size;
  349. int txq_count;
  350. struct tx_queue txq[8];
  351. };
  352. /* port register accessors **************************************************/
  353. static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
  354. {
  355. return readl(mp->shared->base + offset);
  356. }
  357. static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
  358. {
  359. return readl(mp->base + offset);
  360. }
  361. static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
  362. {
  363. writel(data, mp->shared->base + offset);
  364. }
  365. static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
  366. {
  367. writel(data, mp->base + offset);
  368. }
  369. /* rxq/txq helper functions *************************************************/
  370. static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
  371. {
  372. return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
  373. }
  374. static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
  375. {
  376. return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
  377. }
  378. static void rxq_enable(struct rx_queue *rxq)
  379. {
  380. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  381. wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
  382. }
  383. static void rxq_disable(struct rx_queue *rxq)
  384. {
  385. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  386. u8 mask = 1 << rxq->index;
  387. wrlp(mp, RXQ_COMMAND, mask << 8);
  388. while (rdlp(mp, RXQ_COMMAND) & mask)
  389. udelay(10);
  390. }
  391. static void txq_reset_hw_ptr(struct tx_queue *txq)
  392. {
  393. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  394. u32 addr;
  395. addr = (u32)txq->tx_desc_dma;
  396. addr += txq->tx_curr_desc * sizeof(struct tx_desc);
  397. wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
  398. }
  399. static void txq_enable(struct tx_queue *txq)
  400. {
  401. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  402. wrlp(mp, TXQ_COMMAND, 1 << txq->index);
  403. }
  404. static void txq_disable(struct tx_queue *txq)
  405. {
  406. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  407. u8 mask = 1 << txq->index;
  408. wrlp(mp, TXQ_COMMAND, mask << 8);
  409. while (rdlp(mp, TXQ_COMMAND) & mask)
  410. udelay(10);
  411. }
  412. static void txq_maybe_wake(struct tx_queue *txq)
  413. {
  414. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  415. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  416. if (netif_tx_queue_stopped(nq)) {
  417. __netif_tx_lock(nq, smp_processor_id());
  418. if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
  419. netif_tx_wake_queue(nq);
  420. __netif_tx_unlock(nq);
  421. }
  422. }
  423. /* rx napi ******************************************************************/
  424. static int rxq_process(struct rx_queue *rxq, int budget)
  425. {
  426. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  427. struct net_device_stats *stats = &mp->dev->stats;
  428. int rx;
  429. rx = 0;
  430. while (rx < budget && rxq->rx_desc_count) {
  431. struct rx_desc *rx_desc;
  432. unsigned int cmd_sts;
  433. struct sk_buff *skb;
  434. u16 byte_cnt;
  435. rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
  436. cmd_sts = rx_desc->cmd_sts;
  437. if (cmd_sts & BUFFER_OWNED_BY_DMA)
  438. break;
  439. rmb();
  440. skb = rxq->rx_skb[rxq->rx_curr_desc];
  441. rxq->rx_skb[rxq->rx_curr_desc] = NULL;
  442. rxq->rx_curr_desc++;
  443. if (rxq->rx_curr_desc == rxq->rx_ring_size)
  444. rxq->rx_curr_desc = 0;
  445. dma_unmap_single(NULL, rx_desc->buf_ptr,
  446. rx_desc->buf_size, DMA_FROM_DEVICE);
  447. rxq->rx_desc_count--;
  448. rx++;
  449. mp->work_rx_refill |= 1 << rxq->index;
  450. byte_cnt = rx_desc->byte_cnt;
  451. /*
  452. * Update statistics.
  453. *
  454. * Note that the descriptor byte count includes 2 dummy
  455. * bytes automatically inserted by the hardware at the
  456. * start of the packet (which we don't count), and a 4
  457. * byte CRC at the end of the packet (which we do count).
  458. */
  459. stats->rx_packets++;
  460. stats->rx_bytes += byte_cnt - 2;
  461. /*
  462. * In case we received a packet without first / last bits
  463. * on, or the error summary bit is set, the packet needs
  464. * to be dropped.
  465. */
  466. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
  467. != (RX_FIRST_DESC | RX_LAST_DESC))
  468. goto err;
  469. /*
  470. * The -4 is for the CRC in the trailer of the
  471. * received packet
  472. */
  473. skb_put(skb, byte_cnt - 2 - 4);
  474. if (cmd_sts & LAYER_4_CHECKSUM_OK)
  475. skb->ip_summed = CHECKSUM_UNNECESSARY;
  476. skb->protocol = eth_type_trans(skb, mp->dev);
  477. netif_receive_skb(skb);
  478. continue;
  479. err:
  480. stats->rx_dropped++;
  481. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  482. (RX_FIRST_DESC | RX_LAST_DESC)) {
  483. if (net_ratelimit())
  484. dev_printk(KERN_ERR, &mp->dev->dev,
  485. "received packet spanning "
  486. "multiple descriptors\n");
  487. }
  488. if (cmd_sts & ERROR_SUMMARY)
  489. stats->rx_errors++;
  490. dev_kfree_skb(skb);
  491. }
  492. if (rx < budget)
  493. mp->work_rx &= ~(1 << rxq->index);
  494. return rx;
  495. }
  496. static int rxq_refill(struct rx_queue *rxq, int budget)
  497. {
  498. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  499. int refilled;
  500. refilled = 0;
  501. while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
  502. struct sk_buff *skb;
  503. int unaligned;
  504. int rx;
  505. skb = __skb_dequeue(&mp->rx_recycle);
  506. if (skb == NULL)
  507. skb = dev_alloc_skb(mp->skb_size +
  508. dma_get_cache_alignment() - 1);
  509. if (skb == NULL) {
  510. mp->work_rx_oom |= 1 << rxq->index;
  511. goto oom;
  512. }
  513. unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
  514. if (unaligned)
  515. skb_reserve(skb, dma_get_cache_alignment() - unaligned);
  516. refilled++;
  517. rxq->rx_desc_count++;
  518. rx = rxq->rx_used_desc++;
  519. if (rxq->rx_used_desc == rxq->rx_ring_size)
  520. rxq->rx_used_desc = 0;
  521. rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
  522. mp->skb_size, DMA_FROM_DEVICE);
  523. rxq->rx_desc_area[rx].buf_size = mp->skb_size;
  524. rxq->rx_skb[rx] = skb;
  525. wmb();
  526. rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
  527. RX_ENABLE_INTERRUPT;
  528. wmb();
  529. /*
  530. * The hardware automatically prepends 2 bytes of
  531. * dummy data to each received packet, so that the
  532. * IP header ends up 16-byte aligned.
  533. */
  534. skb_reserve(skb, 2);
  535. }
  536. if (refilled < budget)
  537. mp->work_rx_refill &= ~(1 << rxq->index);
  538. oom:
  539. return refilled;
  540. }
  541. /* tx ***********************************************************************/
  542. static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
  543. {
  544. int frag;
  545. for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
  546. skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
  547. if (fragp->size <= 8 && fragp->page_offset & 7)
  548. return 1;
  549. }
  550. return 0;
  551. }
  552. static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
  553. {
  554. int nr_frags = skb_shinfo(skb)->nr_frags;
  555. int frag;
  556. for (frag = 0; frag < nr_frags; frag++) {
  557. skb_frag_t *this_frag;
  558. int tx_index;
  559. struct tx_desc *desc;
  560. this_frag = &skb_shinfo(skb)->frags[frag];
  561. tx_index = txq->tx_curr_desc++;
  562. if (txq->tx_curr_desc == txq->tx_ring_size)
  563. txq->tx_curr_desc = 0;
  564. desc = &txq->tx_desc_area[tx_index];
  565. /*
  566. * The last fragment will generate an interrupt
  567. * which will free the skb on TX completion.
  568. */
  569. if (frag == nr_frags - 1) {
  570. desc->cmd_sts = BUFFER_OWNED_BY_DMA |
  571. ZERO_PADDING | TX_LAST_DESC |
  572. TX_ENABLE_INTERRUPT;
  573. } else {
  574. desc->cmd_sts = BUFFER_OWNED_BY_DMA;
  575. }
  576. desc->l4i_chk = 0;
  577. desc->byte_cnt = this_frag->size;
  578. desc->buf_ptr = dma_map_page(NULL, this_frag->page,
  579. this_frag->page_offset,
  580. this_frag->size,
  581. DMA_TO_DEVICE);
  582. }
  583. }
  584. static inline __be16 sum16_as_be(__sum16 sum)
  585. {
  586. return (__force __be16)sum;
  587. }
  588. static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
  589. {
  590. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  591. int nr_frags = skb_shinfo(skb)->nr_frags;
  592. int tx_index;
  593. struct tx_desc *desc;
  594. u32 cmd_sts;
  595. u16 l4i_chk;
  596. int length;
  597. cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
  598. l4i_chk = 0;
  599. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  600. int tag_bytes;
  601. BUG_ON(skb->protocol != htons(ETH_P_IP) &&
  602. skb->protocol != htons(ETH_P_8021Q));
  603. tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN;
  604. if (unlikely(tag_bytes & ~12)) {
  605. if (skb_checksum_help(skb) == 0)
  606. goto no_csum;
  607. kfree_skb(skb);
  608. return 1;
  609. }
  610. if (tag_bytes & 4)
  611. cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
  612. if (tag_bytes & 8)
  613. cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
  614. cmd_sts |= GEN_TCP_UDP_CHECKSUM |
  615. GEN_IP_V4_CHECKSUM |
  616. ip_hdr(skb)->ihl << TX_IHL_SHIFT;
  617. switch (ip_hdr(skb)->protocol) {
  618. case IPPROTO_UDP:
  619. cmd_sts |= UDP_FRAME;
  620. l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
  621. break;
  622. case IPPROTO_TCP:
  623. l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
  624. break;
  625. default:
  626. BUG();
  627. }
  628. } else {
  629. no_csum:
  630. /* Errata BTS #50, IHL must be 5 if no HW checksum */
  631. cmd_sts |= 5 << TX_IHL_SHIFT;
  632. }
  633. tx_index = txq->tx_curr_desc++;
  634. if (txq->tx_curr_desc == txq->tx_ring_size)
  635. txq->tx_curr_desc = 0;
  636. desc = &txq->tx_desc_area[tx_index];
  637. if (nr_frags) {
  638. txq_submit_frag_skb(txq, skb);
  639. length = skb_headlen(skb);
  640. } else {
  641. cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
  642. length = skb->len;
  643. }
  644. desc->l4i_chk = l4i_chk;
  645. desc->byte_cnt = length;
  646. desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
  647. __skb_queue_tail(&txq->tx_skb, skb);
  648. /* ensure all other descriptors are written before first cmd_sts */
  649. wmb();
  650. desc->cmd_sts = cmd_sts;
  651. /* clear TX_END status */
  652. mp->work_tx_end &= ~(1 << txq->index);
  653. /* ensure all descriptors are written before poking hardware */
  654. wmb();
  655. txq_enable(txq);
  656. txq->tx_desc_count += nr_frags + 1;
  657. return 0;
  658. }
  659. static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  660. {
  661. struct mv643xx_eth_private *mp = netdev_priv(dev);
  662. int queue;
  663. struct tx_queue *txq;
  664. struct netdev_queue *nq;
  665. queue = skb_get_queue_mapping(skb);
  666. txq = mp->txq + queue;
  667. nq = netdev_get_tx_queue(dev, queue);
  668. if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
  669. txq->tx_dropped++;
  670. dev_printk(KERN_DEBUG, &dev->dev,
  671. "failed to linearize skb with tiny "
  672. "unaligned fragment\n");
  673. return NETDEV_TX_BUSY;
  674. }
  675. if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
  676. if (net_ratelimit())
  677. dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
  678. kfree_skb(skb);
  679. return NETDEV_TX_OK;
  680. }
  681. if (!txq_submit_skb(txq, skb)) {
  682. int entries_left;
  683. txq->tx_bytes += skb->len;
  684. txq->tx_packets++;
  685. dev->trans_start = jiffies;
  686. entries_left = txq->tx_ring_size - txq->tx_desc_count;
  687. if (entries_left < MAX_SKB_FRAGS + 1)
  688. netif_tx_stop_queue(nq);
  689. }
  690. return NETDEV_TX_OK;
  691. }
  692. /* tx napi ******************************************************************/
  693. static void txq_kick(struct tx_queue *txq)
  694. {
  695. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  696. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  697. u32 hw_desc_ptr;
  698. u32 expected_ptr;
  699. __netif_tx_lock(nq, smp_processor_id());
  700. if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
  701. goto out;
  702. hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
  703. expected_ptr = (u32)txq->tx_desc_dma +
  704. txq->tx_curr_desc * sizeof(struct tx_desc);
  705. if (hw_desc_ptr != expected_ptr)
  706. txq_enable(txq);
  707. out:
  708. __netif_tx_unlock(nq);
  709. mp->work_tx_end &= ~(1 << txq->index);
  710. }
  711. static int txq_reclaim(struct tx_queue *txq, int budget, int force)
  712. {
  713. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  714. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  715. int reclaimed;
  716. __netif_tx_lock(nq, smp_processor_id());
  717. reclaimed = 0;
  718. while (reclaimed < budget && txq->tx_desc_count > 0) {
  719. int tx_index;
  720. struct tx_desc *desc;
  721. u32 cmd_sts;
  722. struct sk_buff *skb;
  723. tx_index = txq->tx_used_desc;
  724. desc = &txq->tx_desc_area[tx_index];
  725. cmd_sts = desc->cmd_sts;
  726. if (cmd_sts & BUFFER_OWNED_BY_DMA) {
  727. if (!force)
  728. break;
  729. desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
  730. }
  731. txq->tx_used_desc = tx_index + 1;
  732. if (txq->tx_used_desc == txq->tx_ring_size)
  733. txq->tx_used_desc = 0;
  734. reclaimed++;
  735. txq->tx_desc_count--;
  736. skb = NULL;
  737. if (cmd_sts & TX_LAST_DESC)
  738. skb = __skb_dequeue(&txq->tx_skb);
  739. if (cmd_sts & ERROR_SUMMARY) {
  740. dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
  741. mp->dev->stats.tx_errors++;
  742. }
  743. if (cmd_sts & TX_FIRST_DESC) {
  744. dma_unmap_single(NULL, desc->buf_ptr,
  745. desc->byte_cnt, DMA_TO_DEVICE);
  746. } else {
  747. dma_unmap_page(NULL, desc->buf_ptr,
  748. desc->byte_cnt, DMA_TO_DEVICE);
  749. }
  750. if (skb != NULL) {
  751. if (skb_queue_len(&mp->rx_recycle) <
  752. mp->default_rx_ring_size &&
  753. skb_recycle_check(skb, mp->skb_size))
  754. __skb_queue_head(&mp->rx_recycle, skb);
  755. else
  756. dev_kfree_skb(skb);
  757. }
  758. }
  759. __netif_tx_unlock(nq);
  760. if (reclaimed < budget)
  761. mp->work_tx &= ~(1 << txq->index);
  762. return reclaimed;
  763. }
  764. /* tx rate control **********************************************************/
  765. /*
  766. * Set total maximum TX rate (shared by all TX queues for this port)
  767. * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
  768. */
  769. static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
  770. {
  771. int token_rate;
  772. int mtu;
  773. int bucket_size;
  774. token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
  775. if (token_rate > 1023)
  776. token_rate = 1023;
  777. mtu = (mp->dev->mtu + 255) >> 8;
  778. if (mtu > 63)
  779. mtu = 63;
  780. bucket_size = (burst + 255) >> 8;
  781. if (bucket_size > 65535)
  782. bucket_size = 65535;
  783. switch (mp->shared->tx_bw_control) {
  784. case TX_BW_CONTROL_OLD_LAYOUT:
  785. wrlp(mp, TX_BW_RATE, token_rate);
  786. wrlp(mp, TX_BW_MTU, mtu);
  787. wrlp(mp, TX_BW_BURST, bucket_size);
  788. break;
  789. case TX_BW_CONTROL_NEW_LAYOUT:
  790. wrlp(mp, TX_BW_RATE_MOVED, token_rate);
  791. wrlp(mp, TX_BW_MTU_MOVED, mtu);
  792. wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
  793. break;
  794. }
  795. }
  796. static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
  797. {
  798. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  799. int token_rate;
  800. int bucket_size;
  801. token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
  802. if (token_rate > 1023)
  803. token_rate = 1023;
  804. bucket_size = (burst + 255) >> 8;
  805. if (bucket_size > 65535)
  806. bucket_size = 65535;
  807. wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
  808. wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
  809. }
  810. static void txq_set_fixed_prio_mode(struct tx_queue *txq)
  811. {
  812. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  813. int off;
  814. u32 val;
  815. /*
  816. * Turn on fixed priority mode.
  817. */
  818. off = 0;
  819. switch (mp->shared->tx_bw_control) {
  820. case TX_BW_CONTROL_OLD_LAYOUT:
  821. off = TXQ_FIX_PRIO_CONF;
  822. break;
  823. case TX_BW_CONTROL_NEW_LAYOUT:
  824. off = TXQ_FIX_PRIO_CONF_MOVED;
  825. break;
  826. }
  827. if (off) {
  828. val = rdlp(mp, off);
  829. val |= 1 << txq->index;
  830. wrlp(mp, off, val);
  831. }
  832. }
  833. static void txq_set_wrr(struct tx_queue *txq, int weight)
  834. {
  835. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  836. int off;
  837. u32 val;
  838. /*
  839. * Turn off fixed priority mode.
  840. */
  841. off = 0;
  842. switch (mp->shared->tx_bw_control) {
  843. case TX_BW_CONTROL_OLD_LAYOUT:
  844. off = TXQ_FIX_PRIO_CONF;
  845. break;
  846. case TX_BW_CONTROL_NEW_LAYOUT:
  847. off = TXQ_FIX_PRIO_CONF_MOVED;
  848. break;
  849. }
  850. if (off) {
  851. val = rdlp(mp, off);
  852. val &= ~(1 << txq->index);
  853. wrlp(mp, off, val);
  854. /*
  855. * Configure WRR weight for this queue.
  856. */
  857. val = rdlp(mp, off);
  858. val = (val & ~0xff) | (weight & 0xff);
  859. wrlp(mp, TXQ_BW_WRR_CONF(txq->index), val);
  860. }
  861. }
  862. /* mii management interface *************************************************/
  863. static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
  864. {
  865. struct mv643xx_eth_shared_private *msp = dev_id;
  866. if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
  867. writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
  868. wake_up(&msp->smi_busy_wait);
  869. return IRQ_HANDLED;
  870. }
  871. return IRQ_NONE;
  872. }
  873. static int smi_is_done(struct mv643xx_eth_shared_private *msp)
  874. {
  875. return !(readl(msp->base + SMI_REG) & SMI_BUSY);
  876. }
  877. static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
  878. {
  879. if (msp->err_interrupt == NO_IRQ) {
  880. int i;
  881. for (i = 0; !smi_is_done(msp); i++) {
  882. if (i == 10)
  883. return -ETIMEDOUT;
  884. msleep(10);
  885. }
  886. return 0;
  887. }
  888. if (!smi_is_done(msp)) {
  889. wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
  890. msecs_to_jiffies(100));
  891. if (!smi_is_done(msp))
  892. return -ETIMEDOUT;
  893. }
  894. return 0;
  895. }
  896. static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
  897. {
  898. struct mv643xx_eth_shared_private *msp = bus->priv;
  899. void __iomem *smi_reg = msp->base + SMI_REG;
  900. int ret;
  901. if (smi_wait_ready(msp)) {
  902. printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
  903. return -ETIMEDOUT;
  904. }
  905. writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
  906. if (smi_wait_ready(msp)) {
  907. printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
  908. return -ETIMEDOUT;
  909. }
  910. ret = readl(smi_reg);
  911. if (!(ret & SMI_READ_VALID)) {
  912. printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n");
  913. return -ENODEV;
  914. }
  915. return ret & 0xffff;
  916. }
  917. static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
  918. {
  919. struct mv643xx_eth_shared_private *msp = bus->priv;
  920. void __iomem *smi_reg = msp->base + SMI_REG;
  921. if (smi_wait_ready(msp)) {
  922. printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
  923. return -ETIMEDOUT;
  924. }
  925. writel(SMI_OPCODE_WRITE | (reg << 21) |
  926. (addr << 16) | (val & 0xffff), smi_reg);
  927. if (smi_wait_ready(msp)) {
  928. printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
  929. return -ETIMEDOUT;
  930. }
  931. return 0;
  932. }
  933. /* statistics ***************************************************************/
  934. static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
  935. {
  936. struct mv643xx_eth_private *mp = netdev_priv(dev);
  937. struct net_device_stats *stats = &dev->stats;
  938. unsigned long tx_packets = 0;
  939. unsigned long tx_bytes = 0;
  940. unsigned long tx_dropped = 0;
  941. int i;
  942. for (i = 0; i < mp->txq_count; i++) {
  943. struct tx_queue *txq = mp->txq + i;
  944. tx_packets += txq->tx_packets;
  945. tx_bytes += txq->tx_bytes;
  946. tx_dropped += txq->tx_dropped;
  947. }
  948. stats->tx_packets = tx_packets;
  949. stats->tx_bytes = tx_bytes;
  950. stats->tx_dropped = tx_dropped;
  951. return stats;
  952. }
  953. static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
  954. {
  955. return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
  956. }
  957. static void mib_counters_clear(struct mv643xx_eth_private *mp)
  958. {
  959. int i;
  960. for (i = 0; i < 0x80; i += 4)
  961. mib_read(mp, i);
  962. }
  963. static void mib_counters_update(struct mv643xx_eth_private *mp)
  964. {
  965. struct mib_counters *p = &mp->mib_counters;
  966. spin_lock(&mp->mib_counters_lock);
  967. p->good_octets_received += mib_read(mp, 0x00);
  968. p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
  969. p->bad_octets_received += mib_read(mp, 0x08);
  970. p->internal_mac_transmit_err += mib_read(mp, 0x0c);
  971. p->good_frames_received += mib_read(mp, 0x10);
  972. p->bad_frames_received += mib_read(mp, 0x14);
  973. p->broadcast_frames_received += mib_read(mp, 0x18);
  974. p->multicast_frames_received += mib_read(mp, 0x1c);
  975. p->frames_64_octets += mib_read(mp, 0x20);
  976. p->frames_65_to_127_octets += mib_read(mp, 0x24);
  977. p->frames_128_to_255_octets += mib_read(mp, 0x28);
  978. p->frames_256_to_511_octets += mib_read(mp, 0x2c);
  979. p->frames_512_to_1023_octets += mib_read(mp, 0x30);
  980. p->frames_1024_to_max_octets += mib_read(mp, 0x34);
  981. p->good_octets_sent += mib_read(mp, 0x38);
  982. p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
  983. p->good_frames_sent += mib_read(mp, 0x40);
  984. p->excessive_collision += mib_read(mp, 0x44);
  985. p->multicast_frames_sent += mib_read(mp, 0x48);
  986. p->broadcast_frames_sent += mib_read(mp, 0x4c);
  987. p->unrec_mac_control_received += mib_read(mp, 0x50);
  988. p->fc_sent += mib_read(mp, 0x54);
  989. p->good_fc_received += mib_read(mp, 0x58);
  990. p->bad_fc_received += mib_read(mp, 0x5c);
  991. p->undersize_received += mib_read(mp, 0x60);
  992. p->fragments_received += mib_read(mp, 0x64);
  993. p->oversize_received += mib_read(mp, 0x68);
  994. p->jabber_received += mib_read(mp, 0x6c);
  995. p->mac_receive_error += mib_read(mp, 0x70);
  996. p->bad_crc_event += mib_read(mp, 0x74);
  997. p->collision += mib_read(mp, 0x78);
  998. p->late_collision += mib_read(mp, 0x7c);
  999. spin_unlock(&mp->mib_counters_lock);
  1000. mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
  1001. }
  1002. static void mib_counters_timer_wrapper(unsigned long _mp)
  1003. {
  1004. struct mv643xx_eth_private *mp = (void *)_mp;
  1005. mib_counters_update(mp);
  1006. }
  1007. /* ethtool ******************************************************************/
  1008. struct mv643xx_eth_stats {
  1009. char stat_string[ETH_GSTRING_LEN];
  1010. int sizeof_stat;
  1011. int netdev_off;
  1012. int mp_off;
  1013. };
  1014. #define SSTAT(m) \
  1015. { #m, FIELD_SIZEOF(struct net_device_stats, m), \
  1016. offsetof(struct net_device, stats.m), -1 }
  1017. #define MIBSTAT(m) \
  1018. { #m, FIELD_SIZEOF(struct mib_counters, m), \
  1019. -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
  1020. static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
  1021. SSTAT(rx_packets),
  1022. SSTAT(tx_packets),
  1023. SSTAT(rx_bytes),
  1024. SSTAT(tx_bytes),
  1025. SSTAT(rx_errors),
  1026. SSTAT(tx_errors),
  1027. SSTAT(rx_dropped),
  1028. SSTAT(tx_dropped),
  1029. MIBSTAT(good_octets_received),
  1030. MIBSTAT(bad_octets_received),
  1031. MIBSTAT(internal_mac_transmit_err),
  1032. MIBSTAT(good_frames_received),
  1033. MIBSTAT(bad_frames_received),
  1034. MIBSTAT(broadcast_frames_received),
  1035. MIBSTAT(multicast_frames_received),
  1036. MIBSTAT(frames_64_octets),
  1037. MIBSTAT(frames_65_to_127_octets),
  1038. MIBSTAT(frames_128_to_255_octets),
  1039. MIBSTAT(frames_256_to_511_octets),
  1040. MIBSTAT(frames_512_to_1023_octets),
  1041. MIBSTAT(frames_1024_to_max_octets),
  1042. MIBSTAT(good_octets_sent),
  1043. MIBSTAT(good_frames_sent),
  1044. MIBSTAT(excessive_collision),
  1045. MIBSTAT(multicast_frames_sent),
  1046. MIBSTAT(broadcast_frames_sent),
  1047. MIBSTAT(unrec_mac_control_received),
  1048. MIBSTAT(fc_sent),
  1049. MIBSTAT(good_fc_received),
  1050. MIBSTAT(bad_fc_received),
  1051. MIBSTAT(undersize_received),
  1052. MIBSTAT(fragments_received),
  1053. MIBSTAT(oversize_received),
  1054. MIBSTAT(jabber_received),
  1055. MIBSTAT(mac_receive_error),
  1056. MIBSTAT(bad_crc_event),
  1057. MIBSTAT(collision),
  1058. MIBSTAT(late_collision),
  1059. };
  1060. static int
  1061. mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1062. {
  1063. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1064. int err;
  1065. err = phy_read_status(mp->phy);
  1066. if (err == 0)
  1067. err = phy_ethtool_gset(mp->phy, cmd);
  1068. /*
  1069. * The MAC does not support 1000baseT_Half.
  1070. */
  1071. cmd->supported &= ~SUPPORTED_1000baseT_Half;
  1072. cmd->advertising &= ~ADVERTISED_1000baseT_Half;
  1073. return err;
  1074. }
  1075. static int
  1076. mv643xx_eth_get_settings_phyless(struct net_device *dev,
  1077. struct ethtool_cmd *cmd)
  1078. {
  1079. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1080. u32 port_status;
  1081. port_status = rdlp(mp, PORT_STATUS);
  1082. cmd->supported = SUPPORTED_MII;
  1083. cmd->advertising = ADVERTISED_MII;
  1084. switch (port_status & PORT_SPEED_MASK) {
  1085. case PORT_SPEED_10:
  1086. cmd->speed = SPEED_10;
  1087. break;
  1088. case PORT_SPEED_100:
  1089. cmd->speed = SPEED_100;
  1090. break;
  1091. case PORT_SPEED_1000:
  1092. cmd->speed = SPEED_1000;
  1093. break;
  1094. default:
  1095. cmd->speed = -1;
  1096. break;
  1097. }
  1098. cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
  1099. cmd->port = PORT_MII;
  1100. cmd->phy_address = 0;
  1101. cmd->transceiver = XCVR_INTERNAL;
  1102. cmd->autoneg = AUTONEG_DISABLE;
  1103. cmd->maxtxpkt = 1;
  1104. cmd->maxrxpkt = 1;
  1105. return 0;
  1106. }
  1107. static int
  1108. mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1109. {
  1110. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1111. /*
  1112. * The MAC does not support 1000baseT_Half.
  1113. */
  1114. cmd->advertising &= ~ADVERTISED_1000baseT_Half;
  1115. return phy_ethtool_sset(mp->phy, cmd);
  1116. }
  1117. static int
  1118. mv643xx_eth_set_settings_phyless(struct net_device *dev,
  1119. struct ethtool_cmd *cmd)
  1120. {
  1121. return -EINVAL;
  1122. }
  1123. static void mv643xx_eth_get_drvinfo(struct net_device *dev,
  1124. struct ethtool_drvinfo *drvinfo)
  1125. {
  1126. strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
  1127. strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
  1128. strncpy(drvinfo->fw_version, "N/A", 32);
  1129. strncpy(drvinfo->bus_info, "platform", 32);
  1130. drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
  1131. }
  1132. static int mv643xx_eth_nway_reset(struct net_device *dev)
  1133. {
  1134. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1135. return genphy_restart_aneg(mp->phy);
  1136. }
  1137. static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
  1138. {
  1139. return -EINVAL;
  1140. }
  1141. static u32 mv643xx_eth_get_link(struct net_device *dev)
  1142. {
  1143. return !!netif_carrier_ok(dev);
  1144. }
  1145. static void mv643xx_eth_get_strings(struct net_device *dev,
  1146. uint32_t stringset, uint8_t *data)
  1147. {
  1148. int i;
  1149. if (stringset == ETH_SS_STATS) {
  1150. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1151. memcpy(data + i * ETH_GSTRING_LEN,
  1152. mv643xx_eth_stats[i].stat_string,
  1153. ETH_GSTRING_LEN);
  1154. }
  1155. }
  1156. }
  1157. static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
  1158. struct ethtool_stats *stats,
  1159. uint64_t *data)
  1160. {
  1161. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1162. int i;
  1163. mv643xx_eth_get_stats(dev);
  1164. mib_counters_update(mp);
  1165. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1166. const struct mv643xx_eth_stats *stat;
  1167. void *p;
  1168. stat = mv643xx_eth_stats + i;
  1169. if (stat->netdev_off >= 0)
  1170. p = ((void *)mp->dev) + stat->netdev_off;
  1171. else
  1172. p = ((void *)mp) + stat->mp_off;
  1173. data[i] = (stat->sizeof_stat == 8) ?
  1174. *(uint64_t *)p : *(uint32_t *)p;
  1175. }
  1176. }
  1177. static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
  1178. {
  1179. if (sset == ETH_SS_STATS)
  1180. return ARRAY_SIZE(mv643xx_eth_stats);
  1181. return -EOPNOTSUPP;
  1182. }
  1183. static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
  1184. .get_settings = mv643xx_eth_get_settings,
  1185. .set_settings = mv643xx_eth_set_settings,
  1186. .get_drvinfo = mv643xx_eth_get_drvinfo,
  1187. .nway_reset = mv643xx_eth_nway_reset,
  1188. .get_link = mv643xx_eth_get_link,
  1189. .set_sg = ethtool_op_set_sg,
  1190. .get_strings = mv643xx_eth_get_strings,
  1191. .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
  1192. .get_sset_count = mv643xx_eth_get_sset_count,
  1193. };
  1194. static const struct ethtool_ops mv643xx_eth_ethtool_ops_phyless = {
  1195. .get_settings = mv643xx_eth_get_settings_phyless,
  1196. .set_settings = mv643xx_eth_set_settings_phyless,
  1197. .get_drvinfo = mv643xx_eth_get_drvinfo,
  1198. .nway_reset = mv643xx_eth_nway_reset_phyless,
  1199. .get_link = mv643xx_eth_get_link,
  1200. .set_sg = ethtool_op_set_sg,
  1201. .get_strings = mv643xx_eth_get_strings,
  1202. .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
  1203. .get_sset_count = mv643xx_eth_get_sset_count,
  1204. };
  1205. /* address handling *********************************************************/
  1206. static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
  1207. {
  1208. unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
  1209. unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
  1210. addr[0] = (mac_h >> 24) & 0xff;
  1211. addr[1] = (mac_h >> 16) & 0xff;
  1212. addr[2] = (mac_h >> 8) & 0xff;
  1213. addr[3] = mac_h & 0xff;
  1214. addr[4] = (mac_l >> 8) & 0xff;
  1215. addr[5] = mac_l & 0xff;
  1216. }
  1217. static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
  1218. {
  1219. wrlp(mp, MAC_ADDR_HIGH,
  1220. (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
  1221. wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
  1222. }
  1223. static u32 uc_addr_filter_mask(struct net_device *dev)
  1224. {
  1225. struct dev_addr_list *uc_ptr;
  1226. u32 nibbles;
  1227. if (dev->flags & IFF_PROMISC)
  1228. return 0;
  1229. nibbles = 1 << (dev->dev_addr[5] & 0x0f);
  1230. for (uc_ptr = dev->uc_list; uc_ptr != NULL; uc_ptr = uc_ptr->next) {
  1231. if (memcmp(dev->dev_addr, uc_ptr->da_addr, 5))
  1232. return 0;
  1233. if ((dev->dev_addr[5] ^ uc_ptr->da_addr[5]) & 0xf0)
  1234. return 0;
  1235. nibbles |= 1 << (uc_ptr->da_addr[5] & 0x0f);
  1236. }
  1237. return nibbles;
  1238. }
  1239. static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
  1240. {
  1241. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1242. u32 port_config;
  1243. u32 nibbles;
  1244. int i;
  1245. uc_addr_set(mp, dev->dev_addr);
  1246. port_config = rdlp(mp, PORT_CONFIG);
  1247. nibbles = uc_addr_filter_mask(dev);
  1248. if (!nibbles) {
  1249. port_config |= UNICAST_PROMISCUOUS_MODE;
  1250. wrlp(mp, PORT_CONFIG, port_config);
  1251. return;
  1252. }
  1253. for (i = 0; i < 16; i += 4) {
  1254. int off = UNICAST_TABLE(mp->port_num) + i;
  1255. u32 v;
  1256. v = 0;
  1257. if (nibbles & 1)
  1258. v |= 0x00000001;
  1259. if (nibbles & 2)
  1260. v |= 0x00000100;
  1261. if (nibbles & 4)
  1262. v |= 0x00010000;
  1263. if (nibbles & 8)
  1264. v |= 0x01000000;
  1265. nibbles >>= 4;
  1266. wrl(mp, off, v);
  1267. }
  1268. port_config &= ~UNICAST_PROMISCUOUS_MODE;
  1269. wrlp(mp, PORT_CONFIG, port_config);
  1270. }
  1271. static int addr_crc(unsigned char *addr)
  1272. {
  1273. int crc = 0;
  1274. int i;
  1275. for (i = 0; i < 6; i++) {
  1276. int j;
  1277. crc = (crc ^ addr[i]) << 8;
  1278. for (j = 7; j >= 0; j--) {
  1279. if (crc & (0x100 << j))
  1280. crc ^= 0x107 << j;
  1281. }
  1282. }
  1283. return crc;
  1284. }
  1285. static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
  1286. {
  1287. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1288. u32 *mc_spec;
  1289. u32 *mc_other;
  1290. struct dev_addr_list *addr;
  1291. int i;
  1292. if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  1293. int port_num;
  1294. u32 accept;
  1295. int i;
  1296. oom:
  1297. port_num = mp->port_num;
  1298. accept = 0x01010101;
  1299. for (i = 0; i < 0x100; i += 4) {
  1300. wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
  1301. wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
  1302. }
  1303. return;
  1304. }
  1305. mc_spec = kmalloc(0x200, GFP_KERNEL);
  1306. if (mc_spec == NULL)
  1307. goto oom;
  1308. mc_other = mc_spec + (0x100 >> 2);
  1309. memset(mc_spec, 0, 0x100);
  1310. memset(mc_other, 0, 0x100);
  1311. for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
  1312. u8 *a = addr->da_addr;
  1313. u32 *table;
  1314. int entry;
  1315. if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
  1316. table = mc_spec;
  1317. entry = a[5];
  1318. } else {
  1319. table = mc_other;
  1320. entry = addr_crc(a);
  1321. }
  1322. table[entry >> 2] |= 1 << (entry & 3);
  1323. }
  1324. for (i = 0; i < 0x100; i += 4) {
  1325. wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
  1326. wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
  1327. }
  1328. kfree(mc_spec);
  1329. }
  1330. static void mv643xx_eth_set_rx_mode(struct net_device *dev)
  1331. {
  1332. mv643xx_eth_program_unicast_filter(dev);
  1333. mv643xx_eth_program_multicast_filter(dev);
  1334. }
  1335. static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
  1336. {
  1337. struct sockaddr *sa = addr;
  1338. memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
  1339. netif_addr_lock_bh(dev);
  1340. mv643xx_eth_program_unicast_filter(dev);
  1341. netif_addr_unlock_bh(dev);
  1342. return 0;
  1343. }
  1344. /* rx/tx queue initialisation ***********************************************/
  1345. static int rxq_init(struct mv643xx_eth_private *mp, int index)
  1346. {
  1347. struct rx_queue *rxq = mp->rxq + index;
  1348. struct rx_desc *rx_desc;
  1349. int size;
  1350. int i;
  1351. rxq->index = index;
  1352. rxq->rx_ring_size = mp->default_rx_ring_size;
  1353. rxq->rx_desc_count = 0;
  1354. rxq->rx_curr_desc = 0;
  1355. rxq->rx_used_desc = 0;
  1356. size = rxq->rx_ring_size * sizeof(struct rx_desc);
  1357. if (index == 0 && size <= mp->rx_desc_sram_size) {
  1358. rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
  1359. mp->rx_desc_sram_size);
  1360. rxq->rx_desc_dma = mp->rx_desc_sram_addr;
  1361. } else {
  1362. rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
  1363. &rxq->rx_desc_dma,
  1364. GFP_KERNEL);
  1365. }
  1366. if (rxq->rx_desc_area == NULL) {
  1367. dev_printk(KERN_ERR, &mp->dev->dev,
  1368. "can't allocate rx ring (%d bytes)\n", size);
  1369. goto out;
  1370. }
  1371. memset(rxq->rx_desc_area, 0, size);
  1372. rxq->rx_desc_area_size = size;
  1373. rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
  1374. GFP_KERNEL);
  1375. if (rxq->rx_skb == NULL) {
  1376. dev_printk(KERN_ERR, &mp->dev->dev,
  1377. "can't allocate rx skb ring\n");
  1378. goto out_free;
  1379. }
  1380. rx_desc = (struct rx_desc *)rxq->rx_desc_area;
  1381. for (i = 0; i < rxq->rx_ring_size; i++) {
  1382. int nexti;
  1383. nexti = i + 1;
  1384. if (nexti == rxq->rx_ring_size)
  1385. nexti = 0;
  1386. rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
  1387. nexti * sizeof(struct rx_desc);
  1388. }
  1389. return 0;
  1390. out_free:
  1391. if (index == 0 && size <= mp->rx_desc_sram_size)
  1392. iounmap(rxq->rx_desc_area);
  1393. else
  1394. dma_free_coherent(NULL, size,
  1395. rxq->rx_desc_area,
  1396. rxq->rx_desc_dma);
  1397. out:
  1398. return -ENOMEM;
  1399. }
  1400. static void rxq_deinit(struct rx_queue *rxq)
  1401. {
  1402. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  1403. int i;
  1404. rxq_disable(rxq);
  1405. for (i = 0; i < rxq->rx_ring_size; i++) {
  1406. if (rxq->rx_skb[i]) {
  1407. dev_kfree_skb(rxq->rx_skb[i]);
  1408. rxq->rx_desc_count--;
  1409. }
  1410. }
  1411. if (rxq->rx_desc_count) {
  1412. dev_printk(KERN_ERR, &mp->dev->dev,
  1413. "error freeing rx ring -- %d skbs stuck\n",
  1414. rxq->rx_desc_count);
  1415. }
  1416. if (rxq->index == 0 &&
  1417. rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
  1418. iounmap(rxq->rx_desc_area);
  1419. else
  1420. dma_free_coherent(NULL, rxq->rx_desc_area_size,
  1421. rxq->rx_desc_area, rxq->rx_desc_dma);
  1422. kfree(rxq->rx_skb);
  1423. }
  1424. static int txq_init(struct mv643xx_eth_private *mp, int index)
  1425. {
  1426. struct tx_queue *txq = mp->txq + index;
  1427. struct tx_desc *tx_desc;
  1428. int size;
  1429. int i;
  1430. txq->index = index;
  1431. txq->tx_ring_size = mp->default_tx_ring_size;
  1432. txq->tx_desc_count = 0;
  1433. txq->tx_curr_desc = 0;
  1434. txq->tx_used_desc = 0;
  1435. size = txq->tx_ring_size * sizeof(struct tx_desc);
  1436. if (index == 0 && size <= mp->tx_desc_sram_size) {
  1437. txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
  1438. mp->tx_desc_sram_size);
  1439. txq->tx_desc_dma = mp->tx_desc_sram_addr;
  1440. } else {
  1441. txq->tx_desc_area = dma_alloc_coherent(NULL, size,
  1442. &txq->tx_desc_dma,
  1443. GFP_KERNEL);
  1444. }
  1445. if (txq->tx_desc_area == NULL) {
  1446. dev_printk(KERN_ERR, &mp->dev->dev,
  1447. "can't allocate tx ring (%d bytes)\n", size);
  1448. return -ENOMEM;
  1449. }
  1450. memset(txq->tx_desc_area, 0, size);
  1451. txq->tx_desc_area_size = size;
  1452. tx_desc = (struct tx_desc *)txq->tx_desc_area;
  1453. for (i = 0; i < txq->tx_ring_size; i++) {
  1454. struct tx_desc *txd = tx_desc + i;
  1455. int nexti;
  1456. nexti = i + 1;
  1457. if (nexti == txq->tx_ring_size)
  1458. nexti = 0;
  1459. txd->cmd_sts = 0;
  1460. txd->next_desc_ptr = txq->tx_desc_dma +
  1461. nexti * sizeof(struct tx_desc);
  1462. }
  1463. skb_queue_head_init(&txq->tx_skb);
  1464. return 0;
  1465. }
  1466. static void txq_deinit(struct tx_queue *txq)
  1467. {
  1468. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  1469. txq_disable(txq);
  1470. txq_reclaim(txq, txq->tx_ring_size, 1);
  1471. BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
  1472. if (txq->index == 0 &&
  1473. txq->tx_desc_area_size <= mp->tx_desc_sram_size)
  1474. iounmap(txq->tx_desc_area);
  1475. else
  1476. dma_free_coherent(NULL, txq->tx_desc_area_size,
  1477. txq->tx_desc_area, txq->tx_desc_dma);
  1478. }
  1479. /* netdev ops and related ***************************************************/
  1480. static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
  1481. {
  1482. u32 int_cause;
  1483. u32 int_cause_ext;
  1484. int_cause = rdlp(mp, INT_CAUSE) & (INT_TX_END | INT_RX | INT_EXT);
  1485. if (int_cause == 0)
  1486. return 0;
  1487. int_cause_ext = 0;
  1488. if (int_cause & INT_EXT)
  1489. int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
  1490. int_cause &= INT_TX_END | INT_RX;
  1491. if (int_cause) {
  1492. wrlp(mp, INT_CAUSE, ~int_cause);
  1493. mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
  1494. ~(rdlp(mp, TXQ_COMMAND) & 0xff);
  1495. mp->work_rx |= (int_cause & INT_RX) >> 2;
  1496. }
  1497. int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
  1498. if (int_cause_ext) {
  1499. wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
  1500. if (int_cause_ext & INT_EXT_LINK_PHY)
  1501. mp->work_link = 1;
  1502. mp->work_tx |= int_cause_ext & INT_EXT_TX;
  1503. }
  1504. return 1;
  1505. }
  1506. static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
  1507. {
  1508. struct net_device *dev = (struct net_device *)dev_id;
  1509. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1510. if (unlikely(!mv643xx_eth_collect_events(mp)))
  1511. return IRQ_NONE;
  1512. wrlp(mp, INT_MASK, 0);
  1513. napi_schedule(&mp->napi);
  1514. return IRQ_HANDLED;
  1515. }
  1516. static void handle_link_event(struct mv643xx_eth_private *mp)
  1517. {
  1518. struct net_device *dev = mp->dev;
  1519. u32 port_status;
  1520. int speed;
  1521. int duplex;
  1522. int fc;
  1523. port_status = rdlp(mp, PORT_STATUS);
  1524. if (!(port_status & LINK_UP)) {
  1525. if (netif_carrier_ok(dev)) {
  1526. int i;
  1527. printk(KERN_INFO "%s: link down\n", dev->name);
  1528. netif_carrier_off(dev);
  1529. for (i = 0; i < mp->txq_count; i++) {
  1530. struct tx_queue *txq = mp->txq + i;
  1531. txq_reclaim(txq, txq->tx_ring_size, 1);
  1532. txq_reset_hw_ptr(txq);
  1533. }
  1534. }
  1535. return;
  1536. }
  1537. switch (port_status & PORT_SPEED_MASK) {
  1538. case PORT_SPEED_10:
  1539. speed = 10;
  1540. break;
  1541. case PORT_SPEED_100:
  1542. speed = 100;
  1543. break;
  1544. case PORT_SPEED_1000:
  1545. speed = 1000;
  1546. break;
  1547. default:
  1548. speed = -1;
  1549. break;
  1550. }
  1551. duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
  1552. fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
  1553. printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
  1554. "flow control %sabled\n", dev->name,
  1555. speed, duplex ? "full" : "half",
  1556. fc ? "en" : "dis");
  1557. if (!netif_carrier_ok(dev))
  1558. netif_carrier_on(dev);
  1559. }
  1560. static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
  1561. {
  1562. struct mv643xx_eth_private *mp;
  1563. int work_done;
  1564. mp = container_of(napi, struct mv643xx_eth_private, napi);
  1565. mp->work_rx_refill |= mp->work_rx_oom;
  1566. mp->work_rx_oom = 0;
  1567. work_done = 0;
  1568. while (work_done < budget) {
  1569. u8 queue_mask;
  1570. int queue;
  1571. int work_tbd;
  1572. if (mp->work_link) {
  1573. mp->work_link = 0;
  1574. handle_link_event(mp);
  1575. continue;
  1576. }
  1577. queue_mask = mp->work_tx | mp->work_tx_end |
  1578. mp->work_rx | mp->work_rx_refill;
  1579. if (!queue_mask) {
  1580. if (mv643xx_eth_collect_events(mp))
  1581. continue;
  1582. break;
  1583. }
  1584. queue = fls(queue_mask) - 1;
  1585. queue_mask = 1 << queue;
  1586. work_tbd = budget - work_done;
  1587. if (work_tbd > 16)
  1588. work_tbd = 16;
  1589. if (mp->work_tx_end & queue_mask) {
  1590. txq_kick(mp->txq + queue);
  1591. } else if (mp->work_tx & queue_mask) {
  1592. work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
  1593. txq_maybe_wake(mp->txq + queue);
  1594. } else if (mp->work_rx & queue_mask) {
  1595. work_done += rxq_process(mp->rxq + queue, work_tbd);
  1596. } else if (mp->work_rx_refill & queue_mask) {
  1597. work_done += rxq_refill(mp->rxq + queue, work_tbd);
  1598. } else {
  1599. BUG();
  1600. }
  1601. }
  1602. if (work_done < budget) {
  1603. if (mp->work_rx_oom)
  1604. mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
  1605. napi_complete(napi);
  1606. wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
  1607. }
  1608. return work_done;
  1609. }
  1610. static inline void oom_timer_wrapper(unsigned long data)
  1611. {
  1612. struct mv643xx_eth_private *mp = (void *)data;
  1613. napi_schedule(&mp->napi);
  1614. }
  1615. static void phy_reset(struct mv643xx_eth_private *mp)
  1616. {
  1617. int data;
  1618. data = phy_read(mp->phy, MII_BMCR);
  1619. if (data < 0)
  1620. return;
  1621. data |= BMCR_RESET;
  1622. if (phy_write(mp->phy, MII_BMCR, data) < 0)
  1623. return;
  1624. do {
  1625. data = phy_read(mp->phy, MII_BMCR);
  1626. } while (data >= 0 && data & BMCR_RESET);
  1627. }
  1628. static void port_start(struct mv643xx_eth_private *mp)
  1629. {
  1630. u32 pscr;
  1631. int i;
  1632. /*
  1633. * Perform PHY reset, if there is a PHY.
  1634. */
  1635. if (mp->phy != NULL) {
  1636. struct ethtool_cmd cmd;
  1637. mv643xx_eth_get_settings(mp->dev, &cmd);
  1638. phy_reset(mp);
  1639. mv643xx_eth_set_settings(mp->dev, &cmd);
  1640. }
  1641. /*
  1642. * Configure basic link parameters.
  1643. */
  1644. pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  1645. pscr |= SERIAL_PORT_ENABLE;
  1646. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  1647. pscr |= DO_NOT_FORCE_LINK_FAIL;
  1648. if (mp->phy == NULL)
  1649. pscr |= FORCE_LINK_PASS;
  1650. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  1651. wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
  1652. /*
  1653. * Configure TX path and queues.
  1654. */
  1655. tx_set_rate(mp, 1000000000, 16777216);
  1656. for (i = 0; i < mp->txq_count; i++) {
  1657. struct tx_queue *txq = mp->txq + i;
  1658. txq_reset_hw_ptr(txq);
  1659. txq_set_rate(txq, 1000000000, 16777216);
  1660. txq_set_fixed_prio_mode(txq);
  1661. }
  1662. /*
  1663. * Add configured unicast address to address filter table.
  1664. */
  1665. mv643xx_eth_program_unicast_filter(mp->dev);
  1666. /*
  1667. * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
  1668. * frames to RX queue #0, and include the pseudo-header when
  1669. * calculating receive checksums.
  1670. */
  1671. wrlp(mp, PORT_CONFIG, 0x02000000);
  1672. /*
  1673. * Treat BPDUs as normal multicasts, and disable partition mode.
  1674. */
  1675. wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
  1676. /*
  1677. * Enable the receive queues.
  1678. */
  1679. for (i = 0; i < mp->rxq_count; i++) {
  1680. struct rx_queue *rxq = mp->rxq + i;
  1681. u32 addr;
  1682. addr = (u32)rxq->rx_desc_dma;
  1683. addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
  1684. wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
  1685. rxq_enable(rxq);
  1686. }
  1687. }
  1688. static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
  1689. {
  1690. unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
  1691. u32 val;
  1692. val = rdlp(mp, SDMA_CONFIG);
  1693. if (mp->shared->extended_rx_coal_limit) {
  1694. if (coal > 0xffff)
  1695. coal = 0xffff;
  1696. val &= ~0x023fff80;
  1697. val |= (coal & 0x8000) << 10;
  1698. val |= (coal & 0x7fff) << 7;
  1699. } else {
  1700. if (coal > 0x3fff)
  1701. coal = 0x3fff;
  1702. val &= ~0x003fff00;
  1703. val |= (coal & 0x3fff) << 8;
  1704. }
  1705. wrlp(mp, SDMA_CONFIG, val);
  1706. }
  1707. static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
  1708. {
  1709. unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
  1710. if (coal > 0x3fff)
  1711. coal = 0x3fff;
  1712. wrlp(mp, TX_FIFO_URGENT_THRESHOLD, (coal & 0x3fff) << 4);
  1713. }
  1714. static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
  1715. {
  1716. int skb_size;
  1717. /*
  1718. * Reserve 2+14 bytes for an ethernet header (the hardware
  1719. * automatically prepends 2 bytes of dummy data to each
  1720. * received packet), 16 bytes for up to four VLAN tags, and
  1721. * 4 bytes for the trailing FCS -- 36 bytes total.
  1722. */
  1723. skb_size = mp->dev->mtu + 36;
  1724. /*
  1725. * Make sure that the skb size is a multiple of 8 bytes, as
  1726. * the lower three bits of the receive descriptor's buffer
  1727. * size field are ignored by the hardware.
  1728. */
  1729. mp->skb_size = (skb_size + 7) & ~7;
  1730. }
  1731. static int mv643xx_eth_open(struct net_device *dev)
  1732. {
  1733. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1734. int err;
  1735. int i;
  1736. wrlp(mp, INT_CAUSE, 0);
  1737. wrlp(mp, INT_CAUSE_EXT, 0);
  1738. rdlp(mp, INT_CAUSE_EXT);
  1739. err = request_irq(dev->irq, mv643xx_eth_irq,
  1740. IRQF_SHARED, dev->name, dev);
  1741. if (err) {
  1742. dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
  1743. return -EAGAIN;
  1744. }
  1745. mv643xx_eth_recalc_skb_size(mp);
  1746. napi_enable(&mp->napi);
  1747. skb_queue_head_init(&mp->rx_recycle);
  1748. for (i = 0; i < mp->rxq_count; i++) {
  1749. err = rxq_init(mp, i);
  1750. if (err) {
  1751. while (--i >= 0)
  1752. rxq_deinit(mp->rxq + i);
  1753. goto out;
  1754. }
  1755. rxq_refill(mp->rxq + i, INT_MAX);
  1756. }
  1757. if (mp->work_rx_oom) {
  1758. mp->rx_oom.expires = jiffies + (HZ / 10);
  1759. add_timer(&mp->rx_oom);
  1760. }
  1761. for (i = 0; i < mp->txq_count; i++) {
  1762. err = txq_init(mp, i);
  1763. if (err) {
  1764. while (--i >= 0)
  1765. txq_deinit(mp->txq + i);
  1766. goto out_free;
  1767. }
  1768. }
  1769. netif_carrier_off(dev);
  1770. port_start(mp);
  1771. set_rx_coal(mp, 0);
  1772. set_tx_coal(mp, 0);
  1773. wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
  1774. wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
  1775. return 0;
  1776. out_free:
  1777. for (i = 0; i < mp->rxq_count; i++)
  1778. rxq_deinit(mp->rxq + i);
  1779. out:
  1780. free_irq(dev->irq, dev);
  1781. return err;
  1782. }
  1783. static void port_reset(struct mv643xx_eth_private *mp)
  1784. {
  1785. unsigned int data;
  1786. int i;
  1787. for (i = 0; i < mp->rxq_count; i++)
  1788. rxq_disable(mp->rxq + i);
  1789. for (i = 0; i < mp->txq_count; i++)
  1790. txq_disable(mp->txq + i);
  1791. while (1) {
  1792. u32 ps = rdlp(mp, PORT_STATUS);
  1793. if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
  1794. break;
  1795. udelay(10);
  1796. }
  1797. /* Reset the Enable bit in the Configuration Register */
  1798. data = rdlp(mp, PORT_SERIAL_CONTROL);
  1799. data &= ~(SERIAL_PORT_ENABLE |
  1800. DO_NOT_FORCE_LINK_FAIL |
  1801. FORCE_LINK_PASS);
  1802. wrlp(mp, PORT_SERIAL_CONTROL, data);
  1803. }
  1804. static int mv643xx_eth_stop(struct net_device *dev)
  1805. {
  1806. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1807. int i;
  1808. wrlp(mp, INT_MASK, 0x00000000);
  1809. rdlp(mp, INT_MASK);
  1810. del_timer_sync(&mp->mib_counters_timer);
  1811. napi_disable(&mp->napi);
  1812. del_timer_sync(&mp->rx_oom);
  1813. netif_carrier_off(dev);
  1814. free_irq(dev->irq, dev);
  1815. port_reset(mp);
  1816. mv643xx_eth_get_stats(dev);
  1817. mib_counters_update(mp);
  1818. skb_queue_purge(&mp->rx_recycle);
  1819. for (i = 0; i < mp->rxq_count; i++)
  1820. rxq_deinit(mp->rxq + i);
  1821. for (i = 0; i < mp->txq_count; i++)
  1822. txq_deinit(mp->txq + i);
  1823. return 0;
  1824. }
  1825. static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1826. {
  1827. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1828. if (mp->phy != NULL)
  1829. return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd);
  1830. return -EOPNOTSUPP;
  1831. }
  1832. static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
  1833. {
  1834. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1835. if (new_mtu < 64 || new_mtu > 9500)
  1836. return -EINVAL;
  1837. dev->mtu = new_mtu;
  1838. mv643xx_eth_recalc_skb_size(mp);
  1839. tx_set_rate(mp, 1000000000, 16777216);
  1840. if (!netif_running(dev))
  1841. return 0;
  1842. /*
  1843. * Stop and then re-open the interface. This will allocate RX
  1844. * skbs of the new MTU.
  1845. * There is a possible danger that the open will not succeed,
  1846. * due to memory being full.
  1847. */
  1848. mv643xx_eth_stop(dev);
  1849. if (mv643xx_eth_open(dev)) {
  1850. dev_printk(KERN_ERR, &dev->dev,
  1851. "fatal error on re-opening device after "
  1852. "MTU change\n");
  1853. }
  1854. return 0;
  1855. }
  1856. static void tx_timeout_task(struct work_struct *ugly)
  1857. {
  1858. struct mv643xx_eth_private *mp;
  1859. mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
  1860. if (netif_running(mp->dev)) {
  1861. netif_tx_stop_all_queues(mp->dev);
  1862. port_reset(mp);
  1863. port_start(mp);
  1864. netif_tx_wake_all_queues(mp->dev);
  1865. }
  1866. }
  1867. static void mv643xx_eth_tx_timeout(struct net_device *dev)
  1868. {
  1869. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1870. dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
  1871. schedule_work(&mp->tx_timeout_task);
  1872. }
  1873. #ifdef CONFIG_NET_POLL_CONTROLLER
  1874. static void mv643xx_eth_netpoll(struct net_device *dev)
  1875. {
  1876. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1877. wrlp(mp, INT_MASK, 0x00000000);
  1878. rdlp(mp, INT_MASK);
  1879. mv643xx_eth_irq(dev->irq, dev);
  1880. wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
  1881. }
  1882. #endif
  1883. /* platform glue ************************************************************/
  1884. static void
  1885. mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
  1886. struct mbus_dram_target_info *dram)
  1887. {
  1888. void __iomem *base = msp->base;
  1889. u32 win_enable;
  1890. u32 win_protect;
  1891. int i;
  1892. for (i = 0; i < 6; i++) {
  1893. writel(0, base + WINDOW_BASE(i));
  1894. writel(0, base + WINDOW_SIZE(i));
  1895. if (i < 4)
  1896. writel(0, base + WINDOW_REMAP_HIGH(i));
  1897. }
  1898. win_enable = 0x3f;
  1899. win_protect = 0;
  1900. for (i = 0; i < dram->num_cs; i++) {
  1901. struct mbus_dram_window *cs = dram->cs + i;
  1902. writel((cs->base & 0xffff0000) |
  1903. (cs->mbus_attr << 8) |
  1904. dram->mbus_dram_target_id, base + WINDOW_BASE(i));
  1905. writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
  1906. win_enable &= ~(1 << i);
  1907. win_protect |= 3 << (2 * i);
  1908. }
  1909. writel(win_enable, base + WINDOW_BAR_ENABLE);
  1910. msp->win_protect = win_protect;
  1911. }
  1912. static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
  1913. {
  1914. /*
  1915. * Check whether we have a 14-bit coal limit field in bits
  1916. * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
  1917. * SDMA config register.
  1918. */
  1919. writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
  1920. if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
  1921. msp->extended_rx_coal_limit = 1;
  1922. else
  1923. msp->extended_rx_coal_limit = 0;
  1924. /*
  1925. * Check whether the MAC supports TX rate control, and if
  1926. * yes, whether its associated registers are in the old or
  1927. * the new place.
  1928. */
  1929. writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
  1930. if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
  1931. msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
  1932. } else {
  1933. writel(7, msp->base + 0x0400 + TX_BW_RATE);
  1934. if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
  1935. msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
  1936. else
  1937. msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
  1938. }
  1939. }
  1940. static int mv643xx_eth_shared_probe(struct platform_device *pdev)
  1941. {
  1942. static int mv643xx_eth_version_printed;
  1943. struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
  1944. struct mv643xx_eth_shared_private *msp;
  1945. struct resource *res;
  1946. int ret;
  1947. if (!mv643xx_eth_version_printed++)
  1948. printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
  1949. "driver version %s\n", mv643xx_eth_driver_version);
  1950. ret = -EINVAL;
  1951. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1952. if (res == NULL)
  1953. goto out;
  1954. ret = -ENOMEM;
  1955. msp = kmalloc(sizeof(*msp), GFP_KERNEL);
  1956. if (msp == NULL)
  1957. goto out;
  1958. memset(msp, 0, sizeof(*msp));
  1959. msp->base = ioremap(res->start, res->end - res->start + 1);
  1960. if (msp->base == NULL)
  1961. goto out_free;
  1962. /*
  1963. * Set up and register SMI bus.
  1964. */
  1965. if (pd == NULL || pd->shared_smi == NULL) {
  1966. msp->smi_bus = mdiobus_alloc();
  1967. if (msp->smi_bus == NULL)
  1968. goto out_unmap;
  1969. msp->smi_bus->priv = msp;
  1970. msp->smi_bus->name = "mv643xx_eth smi";
  1971. msp->smi_bus->read = smi_bus_read;
  1972. msp->smi_bus->write = smi_bus_write,
  1973. snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
  1974. msp->smi_bus->parent = &pdev->dev;
  1975. msp->smi_bus->phy_mask = 0xffffffff;
  1976. if (mdiobus_register(msp->smi_bus) < 0)
  1977. goto out_free_mii_bus;
  1978. msp->smi = msp;
  1979. } else {
  1980. msp->smi = platform_get_drvdata(pd->shared_smi);
  1981. }
  1982. msp->err_interrupt = NO_IRQ;
  1983. init_waitqueue_head(&msp->smi_busy_wait);
  1984. /*
  1985. * Check whether the error interrupt is hooked up.
  1986. */
  1987. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1988. if (res != NULL) {
  1989. int err;
  1990. err = request_irq(res->start, mv643xx_eth_err_irq,
  1991. IRQF_SHARED, "mv643xx_eth", msp);
  1992. if (!err) {
  1993. writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
  1994. msp->err_interrupt = res->start;
  1995. }
  1996. }
  1997. /*
  1998. * (Re-)program MBUS remapping windows if we are asked to.
  1999. */
  2000. if (pd != NULL && pd->dram != NULL)
  2001. mv643xx_eth_conf_mbus_windows(msp, pd->dram);
  2002. /*
  2003. * Detect hardware parameters.
  2004. */
  2005. msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
  2006. infer_hw_params(msp);
  2007. platform_set_drvdata(pdev, msp);
  2008. return 0;
  2009. out_free_mii_bus:
  2010. mdiobus_free(msp->smi_bus);
  2011. out_unmap:
  2012. iounmap(msp->base);
  2013. out_free:
  2014. kfree(msp);
  2015. out:
  2016. return ret;
  2017. }
  2018. static int mv643xx_eth_shared_remove(struct platform_device *pdev)
  2019. {
  2020. struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
  2021. struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
  2022. if (pd == NULL || pd->shared_smi == NULL) {
  2023. mdiobus_free(msp->smi_bus);
  2024. mdiobus_unregister(msp->smi_bus);
  2025. }
  2026. if (msp->err_interrupt != NO_IRQ)
  2027. free_irq(msp->err_interrupt, msp);
  2028. iounmap(msp->base);
  2029. kfree(msp);
  2030. return 0;
  2031. }
  2032. static struct platform_driver mv643xx_eth_shared_driver = {
  2033. .probe = mv643xx_eth_shared_probe,
  2034. .remove = mv643xx_eth_shared_remove,
  2035. .driver = {
  2036. .name = MV643XX_ETH_SHARED_NAME,
  2037. .owner = THIS_MODULE,
  2038. },
  2039. };
  2040. static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
  2041. {
  2042. int addr_shift = 5 * mp->port_num;
  2043. u32 data;
  2044. data = rdl(mp, PHY_ADDR);
  2045. data &= ~(0x1f << addr_shift);
  2046. data |= (phy_addr & 0x1f) << addr_shift;
  2047. wrl(mp, PHY_ADDR, data);
  2048. }
  2049. static int phy_addr_get(struct mv643xx_eth_private *mp)
  2050. {
  2051. unsigned int data;
  2052. data = rdl(mp, PHY_ADDR);
  2053. return (data >> (5 * mp->port_num)) & 0x1f;
  2054. }
  2055. static void set_params(struct mv643xx_eth_private *mp,
  2056. struct mv643xx_eth_platform_data *pd)
  2057. {
  2058. struct net_device *dev = mp->dev;
  2059. if (is_valid_ether_addr(pd->mac_addr))
  2060. memcpy(dev->dev_addr, pd->mac_addr, 6);
  2061. else
  2062. uc_addr_get(mp, dev->dev_addr);
  2063. mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
  2064. if (pd->rx_queue_size)
  2065. mp->default_rx_ring_size = pd->rx_queue_size;
  2066. mp->rx_desc_sram_addr = pd->rx_sram_addr;
  2067. mp->rx_desc_sram_size = pd->rx_sram_size;
  2068. mp->rxq_count = pd->rx_queue_count ? : 1;
  2069. mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
  2070. if (pd->tx_queue_size)
  2071. mp->default_tx_ring_size = pd->tx_queue_size;
  2072. mp->tx_desc_sram_addr = pd->tx_sram_addr;
  2073. mp->tx_desc_sram_size = pd->tx_sram_size;
  2074. mp->txq_count = pd->tx_queue_count ? : 1;
  2075. }
  2076. static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
  2077. int phy_addr)
  2078. {
  2079. struct mii_bus *bus = mp->shared->smi->smi_bus;
  2080. struct phy_device *phydev;
  2081. int start;
  2082. int num;
  2083. int i;
  2084. if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
  2085. start = phy_addr_get(mp) & 0x1f;
  2086. num = 32;
  2087. } else {
  2088. start = phy_addr & 0x1f;
  2089. num = 1;
  2090. }
  2091. phydev = NULL;
  2092. for (i = 0; i < num; i++) {
  2093. int addr = (start + i) & 0x1f;
  2094. if (bus->phy_map[addr] == NULL)
  2095. mdiobus_scan(bus, addr);
  2096. if (phydev == NULL) {
  2097. phydev = bus->phy_map[addr];
  2098. if (phydev != NULL)
  2099. phy_addr_set(mp, addr);
  2100. }
  2101. }
  2102. return phydev;
  2103. }
  2104. static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
  2105. {
  2106. struct phy_device *phy = mp->phy;
  2107. phy_reset(mp);
  2108. phy_attach(mp->dev, phy->dev.bus_id, 0, PHY_INTERFACE_MODE_GMII);
  2109. if (speed == 0) {
  2110. phy->autoneg = AUTONEG_ENABLE;
  2111. phy->speed = 0;
  2112. phy->duplex = 0;
  2113. phy->advertising = phy->supported | ADVERTISED_Autoneg;
  2114. } else {
  2115. phy->autoneg = AUTONEG_DISABLE;
  2116. phy->advertising = 0;
  2117. phy->speed = speed;
  2118. phy->duplex = duplex;
  2119. }
  2120. phy_start_aneg(phy);
  2121. }
  2122. static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
  2123. {
  2124. u32 pscr;
  2125. pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  2126. if (pscr & SERIAL_PORT_ENABLE) {
  2127. pscr &= ~SERIAL_PORT_ENABLE;
  2128. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  2129. }
  2130. pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
  2131. if (mp->phy == NULL) {
  2132. pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
  2133. if (speed == SPEED_1000)
  2134. pscr |= SET_GMII_SPEED_TO_1000;
  2135. else if (speed == SPEED_100)
  2136. pscr |= SET_MII_SPEED_TO_100;
  2137. pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
  2138. pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
  2139. if (duplex == DUPLEX_FULL)
  2140. pscr |= SET_FULL_DUPLEX_MODE;
  2141. }
  2142. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  2143. }
  2144. static int mv643xx_eth_probe(struct platform_device *pdev)
  2145. {
  2146. struct mv643xx_eth_platform_data *pd;
  2147. struct mv643xx_eth_private *mp;
  2148. struct net_device *dev;
  2149. struct resource *res;
  2150. int err;
  2151. pd = pdev->dev.platform_data;
  2152. if (pd == NULL) {
  2153. dev_printk(KERN_ERR, &pdev->dev,
  2154. "no mv643xx_eth_platform_data\n");
  2155. return -ENODEV;
  2156. }
  2157. if (pd->shared == NULL) {
  2158. dev_printk(KERN_ERR, &pdev->dev,
  2159. "no mv643xx_eth_platform_data->shared\n");
  2160. return -ENODEV;
  2161. }
  2162. dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
  2163. if (!dev)
  2164. return -ENOMEM;
  2165. mp = netdev_priv(dev);
  2166. platform_set_drvdata(pdev, mp);
  2167. mp->shared = platform_get_drvdata(pd->shared);
  2168. mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
  2169. mp->port_num = pd->port_number;
  2170. mp->dev = dev;
  2171. set_params(mp, pd);
  2172. dev->real_num_tx_queues = mp->txq_count;
  2173. if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
  2174. mp->phy = phy_scan(mp, pd->phy_addr);
  2175. if (mp->phy != NULL) {
  2176. phy_init(mp, pd->speed, pd->duplex);
  2177. SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
  2178. } else {
  2179. SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
  2180. }
  2181. init_pscr(mp, pd->speed, pd->duplex);
  2182. mib_counters_clear(mp);
  2183. init_timer(&mp->mib_counters_timer);
  2184. mp->mib_counters_timer.data = (unsigned long)mp;
  2185. mp->mib_counters_timer.function = mib_counters_timer_wrapper;
  2186. mp->mib_counters_timer.expires = jiffies + 30 * HZ;
  2187. add_timer(&mp->mib_counters_timer);
  2188. spin_lock_init(&mp->mib_counters_lock);
  2189. INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
  2190. netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
  2191. init_timer(&mp->rx_oom);
  2192. mp->rx_oom.data = (unsigned long)mp;
  2193. mp->rx_oom.function = oom_timer_wrapper;
  2194. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  2195. BUG_ON(!res);
  2196. dev->irq = res->start;
  2197. dev->get_stats = mv643xx_eth_get_stats;
  2198. dev->hard_start_xmit = mv643xx_eth_xmit;
  2199. dev->open = mv643xx_eth_open;
  2200. dev->stop = mv643xx_eth_stop;
  2201. dev->set_rx_mode = mv643xx_eth_set_rx_mode;
  2202. dev->set_mac_address = mv643xx_eth_set_mac_address;
  2203. dev->do_ioctl = mv643xx_eth_ioctl;
  2204. dev->change_mtu = mv643xx_eth_change_mtu;
  2205. dev->tx_timeout = mv643xx_eth_tx_timeout;
  2206. #ifdef CONFIG_NET_POLL_CONTROLLER
  2207. dev->poll_controller = mv643xx_eth_netpoll;
  2208. #endif
  2209. dev->watchdog_timeo = 2 * HZ;
  2210. dev->base_addr = 0;
  2211. dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
  2212. dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
  2213. SET_NETDEV_DEV(dev, &pdev->dev);
  2214. if (mp->shared->win_protect)
  2215. wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
  2216. err = register_netdev(dev);
  2217. if (err)
  2218. goto out;
  2219. dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n",
  2220. mp->port_num, dev->dev_addr);
  2221. if (mp->tx_desc_sram_size > 0)
  2222. dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
  2223. return 0;
  2224. out:
  2225. free_netdev(dev);
  2226. return err;
  2227. }
  2228. static int mv643xx_eth_remove(struct platform_device *pdev)
  2229. {
  2230. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2231. unregister_netdev(mp->dev);
  2232. if (mp->phy != NULL)
  2233. phy_detach(mp->phy);
  2234. flush_scheduled_work();
  2235. free_netdev(mp->dev);
  2236. platform_set_drvdata(pdev, NULL);
  2237. return 0;
  2238. }
  2239. static void mv643xx_eth_shutdown(struct platform_device *pdev)
  2240. {
  2241. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2242. /* Mask all interrupts on ethernet port */
  2243. wrlp(mp, INT_MASK, 0);
  2244. rdlp(mp, INT_MASK);
  2245. if (netif_running(mp->dev))
  2246. port_reset(mp);
  2247. }
  2248. static struct platform_driver mv643xx_eth_driver = {
  2249. .probe = mv643xx_eth_probe,
  2250. .remove = mv643xx_eth_remove,
  2251. .shutdown = mv643xx_eth_shutdown,
  2252. .driver = {
  2253. .name = MV643XX_ETH_NAME,
  2254. .owner = THIS_MODULE,
  2255. },
  2256. };
  2257. static int __init mv643xx_eth_init_module(void)
  2258. {
  2259. int rc;
  2260. rc = platform_driver_register(&mv643xx_eth_shared_driver);
  2261. if (!rc) {
  2262. rc = platform_driver_register(&mv643xx_eth_driver);
  2263. if (rc)
  2264. platform_driver_unregister(&mv643xx_eth_shared_driver);
  2265. }
  2266. return rc;
  2267. }
  2268. module_init(mv643xx_eth_init_module);
  2269. static void __exit mv643xx_eth_cleanup_module(void)
  2270. {
  2271. platform_driver_unregister(&mv643xx_eth_driver);
  2272. platform_driver_unregister(&mv643xx_eth_shared_driver);
  2273. }
  2274. module_exit(mv643xx_eth_cleanup_module);
  2275. MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
  2276. "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
  2277. MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
  2278. MODULE_LICENSE("GPL");
  2279. MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
  2280. MODULE_ALIAS("platform:" MV643XX_ETH_NAME);