mv643xx_eth.c 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044
  1. /*
  2. * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
  3. * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
  4. *
  5. * Based on the 64360 driver from:
  6. * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
  7. * Rabeeh Khoury <rabeeh@marvell.com>
  8. *
  9. * Copyright (C) 2003 PMC-Sierra, Inc.,
  10. * written by Manish Lachwani
  11. *
  12. * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
  13. *
  14. * Copyright (C) 2004-2006 MontaVista Software, Inc.
  15. * Dale Farnsworth <dale@farnsworth.org>
  16. *
  17. * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
  18. * <sjhill@realitydiluted.com>
  19. *
  20. * Copyright (C) 2007-2008 Marvell Semiconductor
  21. * Lennert Buytenhek <buytenh@marvell.com>
  22. *
  23. * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
  24. *
  25. * This program is free software; you can redistribute it and/or
  26. * modify it under the terms of the GNU General Public License
  27. * as published by the Free Software Foundation; either version 2
  28. * of the License, or (at your option) any later version.
  29. *
  30. * This program is distributed in the hope that it will be useful,
  31. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  32. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  33. * GNU General Public License for more details.
  34. *
  35. * You should have received a copy of the GNU General Public License
  36. * along with this program; if not, write to the Free Software
  37. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  38. */
  39. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  40. #include <linux/init.h>
  41. #include <linux/dma-mapping.h>
  42. #include <linux/in.h>
  43. #include <linux/ip.h>
  44. #include <linux/tcp.h>
  45. #include <linux/udp.h>
  46. #include <linux/etherdevice.h>
  47. #include <linux/delay.h>
  48. #include <linux/ethtool.h>
  49. #include <linux/platform_device.h>
  50. #include <linux/module.h>
  51. #include <linux/kernel.h>
  52. #include <linux/spinlock.h>
  53. #include <linux/workqueue.h>
  54. #include <linux/phy.h>
  55. #include <linux/mv643xx_eth.h>
  56. #include <linux/io.h>
  57. #include <linux/interrupt.h>
  58. #include <linux/types.h>
  59. #include <linux/slab.h>
  60. #include <linux/clk.h>
  61. #include <linux/of.h>
  62. #include <linux/of_irq.h>
  63. #include <linux/of_net.h>
  64. #include <linux/of_mdio.h>
  65. static char mv643xx_eth_driver_name[] = "mv643xx_eth";
  66. static char mv643xx_eth_driver_version[] = "1.4";
  67. /*
  68. * Registers shared between all ports.
  69. */
  70. #define PHY_ADDR 0x0000
  71. #define WINDOW_BASE(w) (0x0200 + ((w) << 3))
  72. #define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
  73. #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
  74. #define WINDOW_BAR_ENABLE 0x0290
  75. #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
  76. /*
  77. * Main per-port registers. These live at offset 0x0400 for
  78. * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
  79. */
  80. #define PORT_CONFIG 0x0000
  81. #define UNICAST_PROMISCUOUS_MODE 0x00000001
  82. #define PORT_CONFIG_EXT 0x0004
  83. #define MAC_ADDR_LOW 0x0014
  84. #define MAC_ADDR_HIGH 0x0018
  85. #define SDMA_CONFIG 0x001c
  86. #define TX_BURST_SIZE_16_64BIT 0x01000000
  87. #define TX_BURST_SIZE_4_64BIT 0x00800000
  88. #define BLM_TX_NO_SWAP 0x00000020
  89. #define BLM_RX_NO_SWAP 0x00000010
  90. #define RX_BURST_SIZE_16_64BIT 0x00000008
  91. #define RX_BURST_SIZE_4_64BIT 0x00000004
  92. #define PORT_SERIAL_CONTROL 0x003c
  93. #define SET_MII_SPEED_TO_100 0x01000000
  94. #define SET_GMII_SPEED_TO_1000 0x00800000
  95. #define SET_FULL_DUPLEX_MODE 0x00200000
  96. #define MAX_RX_PACKET_9700BYTE 0x000a0000
  97. #define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
  98. #define DO_NOT_FORCE_LINK_FAIL 0x00000400
  99. #define SERIAL_PORT_CONTROL_RESERVED 0x00000200
  100. #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
  101. #define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
  102. #define FORCE_LINK_PASS 0x00000002
  103. #define SERIAL_PORT_ENABLE 0x00000001
  104. #define PORT_STATUS 0x0044
  105. #define TX_FIFO_EMPTY 0x00000400
  106. #define TX_IN_PROGRESS 0x00000080
  107. #define PORT_SPEED_MASK 0x00000030
  108. #define PORT_SPEED_1000 0x00000010
  109. #define PORT_SPEED_100 0x00000020
  110. #define PORT_SPEED_10 0x00000000
  111. #define FLOW_CONTROL_ENABLED 0x00000008
  112. #define FULL_DUPLEX 0x00000004
  113. #define LINK_UP 0x00000002
  114. #define TXQ_COMMAND 0x0048
  115. #define TXQ_FIX_PRIO_CONF 0x004c
  116. #define PORT_SERIAL_CONTROL1 0x004c
  117. #define CLK125_BYPASS_EN 0x00000010
  118. #define TX_BW_RATE 0x0050
  119. #define TX_BW_MTU 0x0058
  120. #define TX_BW_BURST 0x005c
  121. #define INT_CAUSE 0x0060
  122. #define INT_TX_END 0x07f80000
  123. #define INT_TX_END_0 0x00080000
  124. #define INT_RX 0x000003fc
  125. #define INT_RX_0 0x00000004
  126. #define INT_EXT 0x00000002
  127. #define INT_CAUSE_EXT 0x0064
  128. #define INT_EXT_LINK_PHY 0x00110000
  129. #define INT_EXT_TX 0x000000ff
  130. #define INT_MASK 0x0068
  131. #define INT_MASK_EXT 0x006c
  132. #define TX_FIFO_URGENT_THRESHOLD 0x0074
  133. #define RX_DISCARD_FRAME_CNT 0x0084
  134. #define RX_OVERRUN_FRAME_CNT 0x0088
  135. #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
  136. #define TX_BW_RATE_MOVED 0x00e0
  137. #define TX_BW_MTU_MOVED 0x00e8
  138. #define TX_BW_BURST_MOVED 0x00ec
  139. #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
  140. #define RXQ_COMMAND 0x0280
  141. #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
  142. #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
  143. #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
  144. #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
  145. /*
  146. * Misc per-port registers.
  147. */
  148. #define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
  149. #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
  150. #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
  151. #define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
  152. /*
  153. * SDMA configuration register default value.
  154. */
  155. #if defined(__BIG_ENDIAN)
  156. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  157. (RX_BURST_SIZE_4_64BIT | \
  158. TX_BURST_SIZE_4_64BIT)
  159. #elif defined(__LITTLE_ENDIAN)
  160. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  161. (RX_BURST_SIZE_4_64BIT | \
  162. BLM_RX_NO_SWAP | \
  163. BLM_TX_NO_SWAP | \
  164. TX_BURST_SIZE_4_64BIT)
  165. #else
  166. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  167. #endif
  168. /*
  169. * Misc definitions.
  170. */
  171. #define DEFAULT_RX_QUEUE_SIZE 128
  172. #define DEFAULT_TX_QUEUE_SIZE 256
  173. #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
  174. /*
  175. * RX/TX descriptors.
  176. */
  177. #if defined(__BIG_ENDIAN)
  178. struct rx_desc {
  179. u16 byte_cnt; /* Descriptor buffer byte count */
  180. u16 buf_size; /* Buffer size */
  181. u32 cmd_sts; /* Descriptor command status */
  182. u32 next_desc_ptr; /* Next descriptor pointer */
  183. u32 buf_ptr; /* Descriptor buffer pointer */
  184. };
  185. struct tx_desc {
  186. u16 byte_cnt; /* buffer byte count */
  187. u16 l4i_chk; /* CPU provided TCP checksum */
  188. u32 cmd_sts; /* Command/status field */
  189. u32 next_desc_ptr; /* Pointer to next descriptor */
  190. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  191. };
  192. #elif defined(__LITTLE_ENDIAN)
  193. struct rx_desc {
  194. u32 cmd_sts; /* Descriptor command status */
  195. u16 buf_size; /* Buffer size */
  196. u16 byte_cnt; /* Descriptor buffer byte count */
  197. u32 buf_ptr; /* Descriptor buffer pointer */
  198. u32 next_desc_ptr; /* Next descriptor pointer */
  199. };
  200. struct tx_desc {
  201. u32 cmd_sts; /* Command/status field */
  202. u16 l4i_chk; /* CPU provided TCP checksum */
  203. u16 byte_cnt; /* buffer byte count */
  204. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  205. u32 next_desc_ptr; /* Pointer to next descriptor */
  206. };
  207. #else
  208. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  209. #endif
  210. /* RX & TX descriptor command */
  211. #define BUFFER_OWNED_BY_DMA 0x80000000
  212. /* RX & TX descriptor status */
  213. #define ERROR_SUMMARY 0x00000001
  214. /* RX descriptor status */
  215. #define LAYER_4_CHECKSUM_OK 0x40000000
  216. #define RX_ENABLE_INTERRUPT 0x20000000
  217. #define RX_FIRST_DESC 0x08000000
  218. #define RX_LAST_DESC 0x04000000
  219. #define RX_IP_HDR_OK 0x02000000
  220. #define RX_PKT_IS_IPV4 0x01000000
  221. #define RX_PKT_IS_ETHERNETV2 0x00800000
  222. #define RX_PKT_LAYER4_TYPE_MASK 0x00600000
  223. #define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
  224. #define RX_PKT_IS_VLAN_TAGGED 0x00080000
  225. /* TX descriptor command */
  226. #define TX_ENABLE_INTERRUPT 0x00800000
  227. #define GEN_CRC 0x00400000
  228. #define TX_FIRST_DESC 0x00200000
  229. #define TX_LAST_DESC 0x00100000
  230. #define ZERO_PADDING 0x00080000
  231. #define GEN_IP_V4_CHECKSUM 0x00040000
  232. #define GEN_TCP_UDP_CHECKSUM 0x00020000
  233. #define UDP_FRAME 0x00010000
  234. #define MAC_HDR_EXTRA_4_BYTES 0x00008000
  235. #define MAC_HDR_EXTRA_8_BYTES 0x00000200
  236. #define TX_IHL_SHIFT 11
  237. /* global *******************************************************************/
  238. struct mv643xx_eth_shared_private {
  239. /*
  240. * Ethernet controller base address.
  241. */
  242. void __iomem *base;
  243. /*
  244. * Per-port MBUS window access register value.
  245. */
  246. u32 win_protect;
  247. /*
  248. * Hardware-specific parameters.
  249. */
  250. int extended_rx_coal_limit;
  251. int tx_bw_control;
  252. int tx_csum_limit;
  253. struct clk *clk;
  254. };
  255. #define TX_BW_CONTROL_ABSENT 0
  256. #define TX_BW_CONTROL_OLD_LAYOUT 1
  257. #define TX_BW_CONTROL_NEW_LAYOUT 2
  258. static int mv643xx_eth_open(struct net_device *dev);
  259. static int mv643xx_eth_stop(struct net_device *dev);
  260. /* per-port *****************************************************************/
  261. struct mib_counters {
  262. u64 good_octets_received;
  263. u32 bad_octets_received;
  264. u32 internal_mac_transmit_err;
  265. u32 good_frames_received;
  266. u32 bad_frames_received;
  267. u32 broadcast_frames_received;
  268. u32 multicast_frames_received;
  269. u32 frames_64_octets;
  270. u32 frames_65_to_127_octets;
  271. u32 frames_128_to_255_octets;
  272. u32 frames_256_to_511_octets;
  273. u32 frames_512_to_1023_octets;
  274. u32 frames_1024_to_max_octets;
  275. u64 good_octets_sent;
  276. u32 good_frames_sent;
  277. u32 excessive_collision;
  278. u32 multicast_frames_sent;
  279. u32 broadcast_frames_sent;
  280. u32 unrec_mac_control_received;
  281. u32 fc_sent;
  282. u32 good_fc_received;
  283. u32 bad_fc_received;
  284. u32 undersize_received;
  285. u32 fragments_received;
  286. u32 oversize_received;
  287. u32 jabber_received;
  288. u32 mac_receive_error;
  289. u32 bad_crc_event;
  290. u32 collision;
  291. u32 late_collision;
  292. /* Non MIB hardware counters */
  293. u32 rx_discard;
  294. u32 rx_overrun;
  295. };
  296. struct rx_queue {
  297. int index;
  298. int rx_ring_size;
  299. int rx_desc_count;
  300. int rx_curr_desc;
  301. int rx_used_desc;
  302. struct rx_desc *rx_desc_area;
  303. dma_addr_t rx_desc_dma;
  304. int rx_desc_area_size;
  305. struct sk_buff **rx_skb;
  306. };
  307. struct tx_queue {
  308. int index;
  309. int tx_ring_size;
  310. int tx_desc_count;
  311. int tx_curr_desc;
  312. int tx_used_desc;
  313. struct tx_desc *tx_desc_area;
  314. dma_addr_t tx_desc_dma;
  315. int tx_desc_area_size;
  316. struct sk_buff_head tx_skb;
  317. unsigned long tx_packets;
  318. unsigned long tx_bytes;
  319. unsigned long tx_dropped;
  320. };
  321. struct mv643xx_eth_private {
  322. struct mv643xx_eth_shared_private *shared;
  323. void __iomem *base;
  324. int port_num;
  325. struct net_device *dev;
  326. struct phy_device *phy;
  327. struct timer_list mib_counters_timer;
  328. spinlock_t mib_counters_lock;
  329. struct mib_counters mib_counters;
  330. struct work_struct tx_timeout_task;
  331. struct napi_struct napi;
  332. u32 int_mask;
  333. u8 oom;
  334. u8 work_link;
  335. u8 work_tx;
  336. u8 work_tx_end;
  337. u8 work_rx;
  338. u8 work_rx_refill;
  339. int skb_size;
  340. /*
  341. * RX state.
  342. */
  343. int rx_ring_size;
  344. unsigned long rx_desc_sram_addr;
  345. int rx_desc_sram_size;
  346. int rxq_count;
  347. struct timer_list rx_oom;
  348. struct rx_queue rxq[8];
  349. /*
  350. * TX state.
  351. */
  352. int tx_ring_size;
  353. unsigned long tx_desc_sram_addr;
  354. int tx_desc_sram_size;
  355. int txq_count;
  356. struct tx_queue txq[8];
  357. /*
  358. * Hardware-specific parameters.
  359. */
  360. struct clk *clk;
  361. unsigned int t_clk;
  362. };
  363. /* port register accessors **************************************************/
  364. static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
  365. {
  366. return readl(mp->shared->base + offset);
  367. }
  368. static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
  369. {
  370. return readl(mp->base + offset);
  371. }
  372. static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
  373. {
  374. writel(data, mp->shared->base + offset);
  375. }
  376. static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
  377. {
  378. writel(data, mp->base + offset);
  379. }
  380. /* rxq/txq helper functions *************************************************/
  381. static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
  382. {
  383. return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
  384. }
  385. static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
  386. {
  387. return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
  388. }
  389. static void rxq_enable(struct rx_queue *rxq)
  390. {
  391. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  392. wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
  393. }
  394. static void rxq_disable(struct rx_queue *rxq)
  395. {
  396. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  397. u8 mask = 1 << rxq->index;
  398. wrlp(mp, RXQ_COMMAND, mask << 8);
  399. while (rdlp(mp, RXQ_COMMAND) & mask)
  400. udelay(10);
  401. }
  402. static void txq_reset_hw_ptr(struct tx_queue *txq)
  403. {
  404. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  405. u32 addr;
  406. addr = (u32)txq->tx_desc_dma;
  407. addr += txq->tx_curr_desc * sizeof(struct tx_desc);
  408. wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
  409. }
  410. static void txq_enable(struct tx_queue *txq)
  411. {
  412. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  413. wrlp(mp, TXQ_COMMAND, 1 << txq->index);
  414. }
  415. static void txq_disable(struct tx_queue *txq)
  416. {
  417. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  418. u8 mask = 1 << txq->index;
  419. wrlp(mp, TXQ_COMMAND, mask << 8);
  420. while (rdlp(mp, TXQ_COMMAND) & mask)
  421. udelay(10);
  422. }
  423. static void txq_maybe_wake(struct tx_queue *txq)
  424. {
  425. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  426. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  427. if (netif_tx_queue_stopped(nq)) {
  428. __netif_tx_lock(nq, smp_processor_id());
  429. if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
  430. netif_tx_wake_queue(nq);
  431. __netif_tx_unlock(nq);
  432. }
  433. }
  434. static int rxq_process(struct rx_queue *rxq, int budget)
  435. {
  436. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  437. struct net_device_stats *stats = &mp->dev->stats;
  438. int rx;
  439. rx = 0;
  440. while (rx < budget && rxq->rx_desc_count) {
  441. struct rx_desc *rx_desc;
  442. unsigned int cmd_sts;
  443. struct sk_buff *skb;
  444. u16 byte_cnt;
  445. rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
  446. cmd_sts = rx_desc->cmd_sts;
  447. if (cmd_sts & BUFFER_OWNED_BY_DMA)
  448. break;
  449. rmb();
  450. skb = rxq->rx_skb[rxq->rx_curr_desc];
  451. rxq->rx_skb[rxq->rx_curr_desc] = NULL;
  452. rxq->rx_curr_desc++;
  453. if (rxq->rx_curr_desc == rxq->rx_ring_size)
  454. rxq->rx_curr_desc = 0;
  455. dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
  456. rx_desc->buf_size, DMA_FROM_DEVICE);
  457. rxq->rx_desc_count--;
  458. rx++;
  459. mp->work_rx_refill |= 1 << rxq->index;
  460. byte_cnt = rx_desc->byte_cnt;
  461. /*
  462. * Update statistics.
  463. *
  464. * Note that the descriptor byte count includes 2 dummy
  465. * bytes automatically inserted by the hardware at the
  466. * start of the packet (which we don't count), and a 4
  467. * byte CRC at the end of the packet (which we do count).
  468. */
  469. stats->rx_packets++;
  470. stats->rx_bytes += byte_cnt - 2;
  471. /*
  472. * In case we received a packet without first / last bits
  473. * on, or the error summary bit is set, the packet needs
  474. * to be dropped.
  475. */
  476. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
  477. != (RX_FIRST_DESC | RX_LAST_DESC))
  478. goto err;
  479. /*
  480. * The -4 is for the CRC in the trailer of the
  481. * received packet
  482. */
  483. skb_put(skb, byte_cnt - 2 - 4);
  484. if (cmd_sts & LAYER_4_CHECKSUM_OK)
  485. skb->ip_summed = CHECKSUM_UNNECESSARY;
  486. skb->protocol = eth_type_trans(skb, mp->dev);
  487. napi_gro_receive(&mp->napi, skb);
  488. continue;
  489. err:
  490. stats->rx_dropped++;
  491. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  492. (RX_FIRST_DESC | RX_LAST_DESC)) {
  493. if (net_ratelimit())
  494. netdev_err(mp->dev,
  495. "received packet spanning multiple descriptors\n");
  496. }
  497. if (cmd_sts & ERROR_SUMMARY)
  498. stats->rx_errors++;
  499. dev_kfree_skb(skb);
  500. }
  501. if (rx < budget)
  502. mp->work_rx &= ~(1 << rxq->index);
  503. return rx;
  504. }
  505. static int rxq_refill(struct rx_queue *rxq, int budget)
  506. {
  507. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  508. int refilled;
  509. refilled = 0;
  510. while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
  511. struct sk_buff *skb;
  512. int rx;
  513. struct rx_desc *rx_desc;
  514. int size;
  515. skb = netdev_alloc_skb(mp->dev, mp->skb_size);
  516. if (skb == NULL) {
  517. mp->oom = 1;
  518. goto oom;
  519. }
  520. if (SKB_DMA_REALIGN)
  521. skb_reserve(skb, SKB_DMA_REALIGN);
  522. refilled++;
  523. rxq->rx_desc_count++;
  524. rx = rxq->rx_used_desc++;
  525. if (rxq->rx_used_desc == rxq->rx_ring_size)
  526. rxq->rx_used_desc = 0;
  527. rx_desc = rxq->rx_desc_area + rx;
  528. size = skb_end_pointer(skb) - skb->data;
  529. rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
  530. skb->data, size,
  531. DMA_FROM_DEVICE);
  532. rx_desc->buf_size = size;
  533. rxq->rx_skb[rx] = skb;
  534. wmb();
  535. rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
  536. wmb();
  537. /*
  538. * The hardware automatically prepends 2 bytes of
  539. * dummy data to each received packet, so that the
  540. * IP header ends up 16-byte aligned.
  541. */
  542. skb_reserve(skb, 2);
  543. }
  544. if (refilled < budget)
  545. mp->work_rx_refill &= ~(1 << rxq->index);
  546. oom:
  547. return refilled;
  548. }
  549. /* tx ***********************************************************************/
  550. static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
  551. {
  552. int frag;
  553. for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
  554. const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
  555. if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
  556. return 1;
  557. }
  558. return 0;
  559. }
  560. static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
  561. {
  562. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  563. int nr_frags = skb_shinfo(skb)->nr_frags;
  564. int frag;
  565. for (frag = 0; frag < nr_frags; frag++) {
  566. skb_frag_t *this_frag;
  567. int tx_index;
  568. struct tx_desc *desc;
  569. this_frag = &skb_shinfo(skb)->frags[frag];
  570. tx_index = txq->tx_curr_desc++;
  571. if (txq->tx_curr_desc == txq->tx_ring_size)
  572. txq->tx_curr_desc = 0;
  573. desc = &txq->tx_desc_area[tx_index];
  574. /*
  575. * The last fragment will generate an interrupt
  576. * which will free the skb on TX completion.
  577. */
  578. if (frag == nr_frags - 1) {
  579. desc->cmd_sts = BUFFER_OWNED_BY_DMA |
  580. ZERO_PADDING | TX_LAST_DESC |
  581. TX_ENABLE_INTERRUPT;
  582. } else {
  583. desc->cmd_sts = BUFFER_OWNED_BY_DMA;
  584. }
  585. desc->l4i_chk = 0;
  586. desc->byte_cnt = skb_frag_size(this_frag);
  587. desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
  588. this_frag, 0,
  589. skb_frag_size(this_frag),
  590. DMA_TO_DEVICE);
  591. }
  592. }
  593. static inline __be16 sum16_as_be(__sum16 sum)
  594. {
  595. return (__force __be16)sum;
  596. }
  597. static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
  598. {
  599. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  600. int nr_frags = skb_shinfo(skb)->nr_frags;
  601. int tx_index;
  602. struct tx_desc *desc;
  603. u32 cmd_sts;
  604. u16 l4i_chk;
  605. int length;
  606. cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
  607. l4i_chk = 0;
  608. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  609. int hdr_len;
  610. int tag_bytes;
  611. BUG_ON(skb->protocol != htons(ETH_P_IP) &&
  612. skb->protocol != htons(ETH_P_8021Q));
  613. hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
  614. tag_bytes = hdr_len - ETH_HLEN;
  615. if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
  616. unlikely(tag_bytes & ~12)) {
  617. if (skb_checksum_help(skb) == 0)
  618. goto no_csum;
  619. kfree_skb(skb);
  620. return 1;
  621. }
  622. if (tag_bytes & 4)
  623. cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
  624. if (tag_bytes & 8)
  625. cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
  626. cmd_sts |= GEN_TCP_UDP_CHECKSUM |
  627. GEN_IP_V4_CHECKSUM |
  628. ip_hdr(skb)->ihl << TX_IHL_SHIFT;
  629. switch (ip_hdr(skb)->protocol) {
  630. case IPPROTO_UDP:
  631. cmd_sts |= UDP_FRAME;
  632. l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
  633. break;
  634. case IPPROTO_TCP:
  635. l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
  636. break;
  637. default:
  638. BUG();
  639. }
  640. } else {
  641. no_csum:
  642. /* Errata BTS #50, IHL must be 5 if no HW checksum */
  643. cmd_sts |= 5 << TX_IHL_SHIFT;
  644. }
  645. tx_index = txq->tx_curr_desc++;
  646. if (txq->tx_curr_desc == txq->tx_ring_size)
  647. txq->tx_curr_desc = 0;
  648. desc = &txq->tx_desc_area[tx_index];
  649. if (nr_frags) {
  650. txq_submit_frag_skb(txq, skb);
  651. length = skb_headlen(skb);
  652. } else {
  653. cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
  654. length = skb->len;
  655. }
  656. desc->l4i_chk = l4i_chk;
  657. desc->byte_cnt = length;
  658. desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
  659. length, DMA_TO_DEVICE);
  660. __skb_queue_tail(&txq->tx_skb, skb);
  661. skb_tx_timestamp(skb);
  662. /* ensure all other descriptors are written before first cmd_sts */
  663. wmb();
  664. desc->cmd_sts = cmd_sts;
  665. /* clear TX_END status */
  666. mp->work_tx_end &= ~(1 << txq->index);
  667. /* ensure all descriptors are written before poking hardware */
  668. wmb();
  669. txq_enable(txq);
  670. txq->tx_desc_count += nr_frags + 1;
  671. return 0;
  672. }
  673. static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  674. {
  675. struct mv643xx_eth_private *mp = netdev_priv(dev);
  676. int length, queue;
  677. struct tx_queue *txq;
  678. struct netdev_queue *nq;
  679. queue = skb_get_queue_mapping(skb);
  680. txq = mp->txq + queue;
  681. nq = netdev_get_tx_queue(dev, queue);
  682. if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
  683. txq->tx_dropped++;
  684. netdev_printk(KERN_DEBUG, dev,
  685. "failed to linearize skb with tiny unaligned fragment\n");
  686. return NETDEV_TX_BUSY;
  687. }
  688. if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
  689. if (net_ratelimit())
  690. netdev_err(dev, "tx queue full?!\n");
  691. kfree_skb(skb);
  692. return NETDEV_TX_OK;
  693. }
  694. length = skb->len;
  695. if (!txq_submit_skb(txq, skb)) {
  696. int entries_left;
  697. txq->tx_bytes += length;
  698. txq->tx_packets++;
  699. entries_left = txq->tx_ring_size - txq->tx_desc_count;
  700. if (entries_left < MAX_SKB_FRAGS + 1)
  701. netif_tx_stop_queue(nq);
  702. }
  703. return NETDEV_TX_OK;
  704. }
  705. /* tx napi ******************************************************************/
  706. static void txq_kick(struct tx_queue *txq)
  707. {
  708. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  709. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  710. u32 hw_desc_ptr;
  711. u32 expected_ptr;
  712. __netif_tx_lock(nq, smp_processor_id());
  713. if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
  714. goto out;
  715. hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
  716. expected_ptr = (u32)txq->tx_desc_dma +
  717. txq->tx_curr_desc * sizeof(struct tx_desc);
  718. if (hw_desc_ptr != expected_ptr)
  719. txq_enable(txq);
  720. out:
  721. __netif_tx_unlock(nq);
  722. mp->work_tx_end &= ~(1 << txq->index);
  723. }
  724. static int txq_reclaim(struct tx_queue *txq, int budget, int force)
  725. {
  726. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  727. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  728. int reclaimed;
  729. __netif_tx_lock_bh(nq);
  730. reclaimed = 0;
  731. while (reclaimed < budget && txq->tx_desc_count > 0) {
  732. int tx_index;
  733. struct tx_desc *desc;
  734. u32 cmd_sts;
  735. struct sk_buff *skb;
  736. tx_index = txq->tx_used_desc;
  737. desc = &txq->tx_desc_area[tx_index];
  738. cmd_sts = desc->cmd_sts;
  739. if (cmd_sts & BUFFER_OWNED_BY_DMA) {
  740. if (!force)
  741. break;
  742. desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
  743. }
  744. txq->tx_used_desc = tx_index + 1;
  745. if (txq->tx_used_desc == txq->tx_ring_size)
  746. txq->tx_used_desc = 0;
  747. reclaimed++;
  748. txq->tx_desc_count--;
  749. skb = NULL;
  750. if (cmd_sts & TX_LAST_DESC)
  751. skb = __skb_dequeue(&txq->tx_skb);
  752. if (cmd_sts & ERROR_SUMMARY) {
  753. netdev_info(mp->dev, "tx error\n");
  754. mp->dev->stats.tx_errors++;
  755. }
  756. if (cmd_sts & TX_FIRST_DESC) {
  757. dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
  758. desc->byte_cnt, DMA_TO_DEVICE);
  759. } else {
  760. dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
  761. desc->byte_cnt, DMA_TO_DEVICE);
  762. }
  763. dev_kfree_skb(skb);
  764. }
  765. __netif_tx_unlock_bh(nq);
  766. if (reclaimed < budget)
  767. mp->work_tx &= ~(1 << txq->index);
  768. return reclaimed;
  769. }
  770. /* tx rate control **********************************************************/
  771. /*
  772. * Set total maximum TX rate (shared by all TX queues for this port)
  773. * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
  774. */
  775. static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
  776. {
  777. int token_rate;
  778. int mtu;
  779. int bucket_size;
  780. token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
  781. if (token_rate > 1023)
  782. token_rate = 1023;
  783. mtu = (mp->dev->mtu + 255) >> 8;
  784. if (mtu > 63)
  785. mtu = 63;
  786. bucket_size = (burst + 255) >> 8;
  787. if (bucket_size > 65535)
  788. bucket_size = 65535;
  789. switch (mp->shared->tx_bw_control) {
  790. case TX_BW_CONTROL_OLD_LAYOUT:
  791. wrlp(mp, TX_BW_RATE, token_rate);
  792. wrlp(mp, TX_BW_MTU, mtu);
  793. wrlp(mp, TX_BW_BURST, bucket_size);
  794. break;
  795. case TX_BW_CONTROL_NEW_LAYOUT:
  796. wrlp(mp, TX_BW_RATE_MOVED, token_rate);
  797. wrlp(mp, TX_BW_MTU_MOVED, mtu);
  798. wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
  799. break;
  800. }
  801. }
  802. static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
  803. {
  804. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  805. int token_rate;
  806. int bucket_size;
  807. token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
  808. if (token_rate > 1023)
  809. token_rate = 1023;
  810. bucket_size = (burst + 255) >> 8;
  811. if (bucket_size > 65535)
  812. bucket_size = 65535;
  813. wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
  814. wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
  815. }
  816. static void txq_set_fixed_prio_mode(struct tx_queue *txq)
  817. {
  818. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  819. int off;
  820. u32 val;
  821. /*
  822. * Turn on fixed priority mode.
  823. */
  824. off = 0;
  825. switch (mp->shared->tx_bw_control) {
  826. case TX_BW_CONTROL_OLD_LAYOUT:
  827. off = TXQ_FIX_PRIO_CONF;
  828. break;
  829. case TX_BW_CONTROL_NEW_LAYOUT:
  830. off = TXQ_FIX_PRIO_CONF_MOVED;
  831. break;
  832. }
  833. if (off) {
  834. val = rdlp(mp, off);
  835. val |= 1 << txq->index;
  836. wrlp(mp, off, val);
  837. }
  838. }
  839. /* mii management interface *************************************************/
  840. static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp)
  841. {
  842. u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  843. u32 autoneg_disable = FORCE_LINK_PASS |
  844. DISABLE_AUTO_NEG_SPEED_GMII |
  845. DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
  846. DISABLE_AUTO_NEG_FOR_DUPLEX;
  847. if (mp->phy->autoneg == AUTONEG_ENABLE) {
  848. /* enable auto negotiation */
  849. pscr &= ~autoneg_disable;
  850. goto out_write;
  851. }
  852. pscr |= autoneg_disable;
  853. if (mp->phy->speed == SPEED_1000) {
  854. /* force gigabit, half duplex not supported */
  855. pscr |= SET_GMII_SPEED_TO_1000;
  856. pscr |= SET_FULL_DUPLEX_MODE;
  857. goto out_write;
  858. }
  859. pscr &= ~SET_GMII_SPEED_TO_1000;
  860. if (mp->phy->speed == SPEED_100)
  861. pscr |= SET_MII_SPEED_TO_100;
  862. else
  863. pscr &= ~SET_MII_SPEED_TO_100;
  864. if (mp->phy->duplex == DUPLEX_FULL)
  865. pscr |= SET_FULL_DUPLEX_MODE;
  866. else
  867. pscr &= ~SET_FULL_DUPLEX_MODE;
  868. out_write:
  869. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  870. }
  871. /* statistics ***************************************************************/
  872. static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
  873. {
  874. struct mv643xx_eth_private *mp = netdev_priv(dev);
  875. struct net_device_stats *stats = &dev->stats;
  876. unsigned long tx_packets = 0;
  877. unsigned long tx_bytes = 0;
  878. unsigned long tx_dropped = 0;
  879. int i;
  880. for (i = 0; i < mp->txq_count; i++) {
  881. struct tx_queue *txq = mp->txq + i;
  882. tx_packets += txq->tx_packets;
  883. tx_bytes += txq->tx_bytes;
  884. tx_dropped += txq->tx_dropped;
  885. }
  886. stats->tx_packets = tx_packets;
  887. stats->tx_bytes = tx_bytes;
  888. stats->tx_dropped = tx_dropped;
  889. return stats;
  890. }
  891. static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
  892. {
  893. return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
  894. }
  895. static void mib_counters_clear(struct mv643xx_eth_private *mp)
  896. {
  897. int i;
  898. for (i = 0; i < 0x80; i += 4)
  899. mib_read(mp, i);
  900. /* Clear non MIB hw counters also */
  901. rdlp(mp, RX_DISCARD_FRAME_CNT);
  902. rdlp(mp, RX_OVERRUN_FRAME_CNT);
  903. }
  904. static void mib_counters_update(struct mv643xx_eth_private *mp)
  905. {
  906. struct mib_counters *p = &mp->mib_counters;
  907. spin_lock_bh(&mp->mib_counters_lock);
  908. p->good_octets_received += mib_read(mp, 0x00);
  909. p->bad_octets_received += mib_read(mp, 0x08);
  910. p->internal_mac_transmit_err += mib_read(mp, 0x0c);
  911. p->good_frames_received += mib_read(mp, 0x10);
  912. p->bad_frames_received += mib_read(mp, 0x14);
  913. p->broadcast_frames_received += mib_read(mp, 0x18);
  914. p->multicast_frames_received += mib_read(mp, 0x1c);
  915. p->frames_64_octets += mib_read(mp, 0x20);
  916. p->frames_65_to_127_octets += mib_read(mp, 0x24);
  917. p->frames_128_to_255_octets += mib_read(mp, 0x28);
  918. p->frames_256_to_511_octets += mib_read(mp, 0x2c);
  919. p->frames_512_to_1023_octets += mib_read(mp, 0x30);
  920. p->frames_1024_to_max_octets += mib_read(mp, 0x34);
  921. p->good_octets_sent += mib_read(mp, 0x38);
  922. p->good_frames_sent += mib_read(mp, 0x40);
  923. p->excessive_collision += mib_read(mp, 0x44);
  924. p->multicast_frames_sent += mib_read(mp, 0x48);
  925. p->broadcast_frames_sent += mib_read(mp, 0x4c);
  926. p->unrec_mac_control_received += mib_read(mp, 0x50);
  927. p->fc_sent += mib_read(mp, 0x54);
  928. p->good_fc_received += mib_read(mp, 0x58);
  929. p->bad_fc_received += mib_read(mp, 0x5c);
  930. p->undersize_received += mib_read(mp, 0x60);
  931. p->fragments_received += mib_read(mp, 0x64);
  932. p->oversize_received += mib_read(mp, 0x68);
  933. p->jabber_received += mib_read(mp, 0x6c);
  934. p->mac_receive_error += mib_read(mp, 0x70);
  935. p->bad_crc_event += mib_read(mp, 0x74);
  936. p->collision += mib_read(mp, 0x78);
  937. p->late_collision += mib_read(mp, 0x7c);
  938. /* Non MIB hardware counters */
  939. p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
  940. p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
  941. spin_unlock_bh(&mp->mib_counters_lock);
  942. }
  943. static void mib_counters_timer_wrapper(unsigned long _mp)
  944. {
  945. struct mv643xx_eth_private *mp = (void *)_mp;
  946. mib_counters_update(mp);
  947. mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
  948. }
  949. /* interrupt coalescing *****************************************************/
  950. /*
  951. * Hardware coalescing parameters are set in units of 64 t_clk
  952. * cycles. I.e.:
  953. *
  954. * coal_delay_in_usec = 64000000 * register_value / t_clk_rate
  955. *
  956. * register_value = coal_delay_in_usec * t_clk_rate / 64000000
  957. *
  958. * In the ->set*() methods, we round the computed register value
  959. * to the nearest integer.
  960. */
  961. static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
  962. {
  963. u32 val = rdlp(mp, SDMA_CONFIG);
  964. u64 temp;
  965. if (mp->shared->extended_rx_coal_limit)
  966. temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
  967. else
  968. temp = (val & 0x003fff00) >> 8;
  969. temp *= 64000000;
  970. do_div(temp, mp->t_clk);
  971. return (unsigned int)temp;
  972. }
  973. static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
  974. {
  975. u64 temp;
  976. u32 val;
  977. temp = (u64)usec * mp->t_clk;
  978. temp += 31999999;
  979. do_div(temp, 64000000);
  980. val = rdlp(mp, SDMA_CONFIG);
  981. if (mp->shared->extended_rx_coal_limit) {
  982. if (temp > 0xffff)
  983. temp = 0xffff;
  984. val &= ~0x023fff80;
  985. val |= (temp & 0x8000) << 10;
  986. val |= (temp & 0x7fff) << 7;
  987. } else {
  988. if (temp > 0x3fff)
  989. temp = 0x3fff;
  990. val &= ~0x003fff00;
  991. val |= (temp & 0x3fff) << 8;
  992. }
  993. wrlp(mp, SDMA_CONFIG, val);
  994. }
  995. static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
  996. {
  997. u64 temp;
  998. temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
  999. temp *= 64000000;
  1000. do_div(temp, mp->t_clk);
  1001. return (unsigned int)temp;
  1002. }
  1003. static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
  1004. {
  1005. u64 temp;
  1006. temp = (u64)usec * mp->t_clk;
  1007. temp += 31999999;
  1008. do_div(temp, 64000000);
  1009. if (temp > 0x3fff)
  1010. temp = 0x3fff;
  1011. wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
  1012. }
  1013. /* ethtool ******************************************************************/
  1014. struct mv643xx_eth_stats {
  1015. char stat_string[ETH_GSTRING_LEN];
  1016. int sizeof_stat;
  1017. int netdev_off;
  1018. int mp_off;
  1019. };
  1020. #define SSTAT(m) \
  1021. { #m, FIELD_SIZEOF(struct net_device_stats, m), \
  1022. offsetof(struct net_device, stats.m), -1 }
  1023. #define MIBSTAT(m) \
  1024. { #m, FIELD_SIZEOF(struct mib_counters, m), \
  1025. -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
  1026. static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
  1027. SSTAT(rx_packets),
  1028. SSTAT(tx_packets),
  1029. SSTAT(rx_bytes),
  1030. SSTAT(tx_bytes),
  1031. SSTAT(rx_errors),
  1032. SSTAT(tx_errors),
  1033. SSTAT(rx_dropped),
  1034. SSTAT(tx_dropped),
  1035. MIBSTAT(good_octets_received),
  1036. MIBSTAT(bad_octets_received),
  1037. MIBSTAT(internal_mac_transmit_err),
  1038. MIBSTAT(good_frames_received),
  1039. MIBSTAT(bad_frames_received),
  1040. MIBSTAT(broadcast_frames_received),
  1041. MIBSTAT(multicast_frames_received),
  1042. MIBSTAT(frames_64_octets),
  1043. MIBSTAT(frames_65_to_127_octets),
  1044. MIBSTAT(frames_128_to_255_octets),
  1045. MIBSTAT(frames_256_to_511_octets),
  1046. MIBSTAT(frames_512_to_1023_octets),
  1047. MIBSTAT(frames_1024_to_max_octets),
  1048. MIBSTAT(good_octets_sent),
  1049. MIBSTAT(good_frames_sent),
  1050. MIBSTAT(excessive_collision),
  1051. MIBSTAT(multicast_frames_sent),
  1052. MIBSTAT(broadcast_frames_sent),
  1053. MIBSTAT(unrec_mac_control_received),
  1054. MIBSTAT(fc_sent),
  1055. MIBSTAT(good_fc_received),
  1056. MIBSTAT(bad_fc_received),
  1057. MIBSTAT(undersize_received),
  1058. MIBSTAT(fragments_received),
  1059. MIBSTAT(oversize_received),
  1060. MIBSTAT(jabber_received),
  1061. MIBSTAT(mac_receive_error),
  1062. MIBSTAT(bad_crc_event),
  1063. MIBSTAT(collision),
  1064. MIBSTAT(late_collision),
  1065. MIBSTAT(rx_discard),
  1066. MIBSTAT(rx_overrun),
  1067. };
  1068. static int
  1069. mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp,
  1070. struct ethtool_cmd *cmd)
  1071. {
  1072. int err;
  1073. err = phy_read_status(mp->phy);
  1074. if (err == 0)
  1075. err = phy_ethtool_gset(mp->phy, cmd);
  1076. /*
  1077. * The MAC does not support 1000baseT_Half.
  1078. */
  1079. cmd->supported &= ~SUPPORTED_1000baseT_Half;
  1080. cmd->advertising &= ~ADVERTISED_1000baseT_Half;
  1081. return err;
  1082. }
  1083. static int
  1084. mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
  1085. struct ethtool_cmd *cmd)
  1086. {
  1087. u32 port_status;
  1088. port_status = rdlp(mp, PORT_STATUS);
  1089. cmd->supported = SUPPORTED_MII;
  1090. cmd->advertising = ADVERTISED_MII;
  1091. switch (port_status & PORT_SPEED_MASK) {
  1092. case PORT_SPEED_10:
  1093. ethtool_cmd_speed_set(cmd, SPEED_10);
  1094. break;
  1095. case PORT_SPEED_100:
  1096. ethtool_cmd_speed_set(cmd, SPEED_100);
  1097. break;
  1098. case PORT_SPEED_1000:
  1099. ethtool_cmd_speed_set(cmd, SPEED_1000);
  1100. break;
  1101. default:
  1102. cmd->speed = -1;
  1103. break;
  1104. }
  1105. cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
  1106. cmd->port = PORT_MII;
  1107. cmd->phy_address = 0;
  1108. cmd->transceiver = XCVR_INTERNAL;
  1109. cmd->autoneg = AUTONEG_DISABLE;
  1110. cmd->maxtxpkt = 1;
  1111. cmd->maxrxpkt = 1;
  1112. return 0;
  1113. }
  1114. static void
  1115. mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  1116. {
  1117. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1118. wol->supported = 0;
  1119. wol->wolopts = 0;
  1120. if (mp->phy)
  1121. phy_ethtool_get_wol(mp->phy, wol);
  1122. }
  1123. static int
  1124. mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  1125. {
  1126. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1127. int err;
  1128. if (mp->phy == NULL)
  1129. return -EOPNOTSUPP;
  1130. err = phy_ethtool_set_wol(mp->phy, wol);
  1131. /* Given that mv643xx_eth works without the marvell-specific PHY driver,
  1132. * this debugging hint is useful to have.
  1133. */
  1134. if (err == -EOPNOTSUPP)
  1135. netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
  1136. return err;
  1137. }
  1138. static int
  1139. mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1140. {
  1141. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1142. if (mp->phy != NULL)
  1143. return mv643xx_eth_get_settings_phy(mp, cmd);
  1144. else
  1145. return mv643xx_eth_get_settings_phyless(mp, cmd);
  1146. }
  1147. static int
  1148. mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1149. {
  1150. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1151. int ret;
  1152. if (mp->phy == NULL)
  1153. return -EINVAL;
  1154. /*
  1155. * The MAC does not support 1000baseT_Half.
  1156. */
  1157. cmd->advertising &= ~ADVERTISED_1000baseT_Half;
  1158. ret = phy_ethtool_sset(mp->phy, cmd);
  1159. if (!ret)
  1160. mv643xx_adjust_pscr(mp);
  1161. return ret;
  1162. }
  1163. static void mv643xx_eth_get_drvinfo(struct net_device *dev,
  1164. struct ethtool_drvinfo *drvinfo)
  1165. {
  1166. strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
  1167. sizeof(drvinfo->driver));
  1168. strlcpy(drvinfo->version, mv643xx_eth_driver_version,
  1169. sizeof(drvinfo->version));
  1170. strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
  1171. strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
  1172. drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
  1173. }
  1174. static int mv643xx_eth_nway_reset(struct net_device *dev)
  1175. {
  1176. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1177. if (mp->phy == NULL)
  1178. return -EINVAL;
  1179. return genphy_restart_aneg(mp->phy);
  1180. }
  1181. static int
  1182. mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
  1183. {
  1184. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1185. ec->rx_coalesce_usecs = get_rx_coal(mp);
  1186. ec->tx_coalesce_usecs = get_tx_coal(mp);
  1187. return 0;
  1188. }
  1189. static int
  1190. mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
  1191. {
  1192. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1193. set_rx_coal(mp, ec->rx_coalesce_usecs);
  1194. set_tx_coal(mp, ec->tx_coalesce_usecs);
  1195. return 0;
  1196. }
  1197. static void
  1198. mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
  1199. {
  1200. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1201. er->rx_max_pending = 4096;
  1202. er->tx_max_pending = 4096;
  1203. er->rx_pending = mp->rx_ring_size;
  1204. er->tx_pending = mp->tx_ring_size;
  1205. }
  1206. static int
  1207. mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
  1208. {
  1209. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1210. if (er->rx_mini_pending || er->rx_jumbo_pending)
  1211. return -EINVAL;
  1212. mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
  1213. mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096;
  1214. if (netif_running(dev)) {
  1215. mv643xx_eth_stop(dev);
  1216. if (mv643xx_eth_open(dev)) {
  1217. netdev_err(dev,
  1218. "fatal error on re-opening device after ring param change\n");
  1219. return -ENOMEM;
  1220. }
  1221. }
  1222. return 0;
  1223. }
  1224. static int
  1225. mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
  1226. {
  1227. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1228. bool rx_csum = features & NETIF_F_RXCSUM;
  1229. wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
  1230. return 0;
  1231. }
  1232. static void mv643xx_eth_get_strings(struct net_device *dev,
  1233. uint32_t stringset, uint8_t *data)
  1234. {
  1235. int i;
  1236. if (stringset == ETH_SS_STATS) {
  1237. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1238. memcpy(data + i * ETH_GSTRING_LEN,
  1239. mv643xx_eth_stats[i].stat_string,
  1240. ETH_GSTRING_LEN);
  1241. }
  1242. }
  1243. }
  1244. static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
  1245. struct ethtool_stats *stats,
  1246. uint64_t *data)
  1247. {
  1248. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1249. int i;
  1250. mv643xx_eth_get_stats(dev);
  1251. mib_counters_update(mp);
  1252. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1253. const struct mv643xx_eth_stats *stat;
  1254. void *p;
  1255. stat = mv643xx_eth_stats + i;
  1256. if (stat->netdev_off >= 0)
  1257. p = ((void *)mp->dev) + stat->netdev_off;
  1258. else
  1259. p = ((void *)mp) + stat->mp_off;
  1260. data[i] = (stat->sizeof_stat == 8) ?
  1261. *(uint64_t *)p : *(uint32_t *)p;
  1262. }
  1263. }
  1264. static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
  1265. {
  1266. if (sset == ETH_SS_STATS)
  1267. return ARRAY_SIZE(mv643xx_eth_stats);
  1268. return -EOPNOTSUPP;
  1269. }
  1270. static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
  1271. .get_settings = mv643xx_eth_get_settings,
  1272. .set_settings = mv643xx_eth_set_settings,
  1273. .get_drvinfo = mv643xx_eth_get_drvinfo,
  1274. .nway_reset = mv643xx_eth_nway_reset,
  1275. .get_link = ethtool_op_get_link,
  1276. .get_coalesce = mv643xx_eth_get_coalesce,
  1277. .set_coalesce = mv643xx_eth_set_coalesce,
  1278. .get_ringparam = mv643xx_eth_get_ringparam,
  1279. .set_ringparam = mv643xx_eth_set_ringparam,
  1280. .get_strings = mv643xx_eth_get_strings,
  1281. .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
  1282. .get_sset_count = mv643xx_eth_get_sset_count,
  1283. .get_ts_info = ethtool_op_get_ts_info,
  1284. .get_wol = mv643xx_eth_get_wol,
  1285. .set_wol = mv643xx_eth_set_wol,
  1286. };
  1287. /* address handling *********************************************************/
  1288. static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
  1289. {
  1290. unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
  1291. unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
  1292. addr[0] = (mac_h >> 24) & 0xff;
  1293. addr[1] = (mac_h >> 16) & 0xff;
  1294. addr[2] = (mac_h >> 8) & 0xff;
  1295. addr[3] = mac_h & 0xff;
  1296. addr[4] = (mac_l >> 8) & 0xff;
  1297. addr[5] = mac_l & 0xff;
  1298. }
  1299. static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
  1300. {
  1301. wrlp(mp, MAC_ADDR_HIGH,
  1302. (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
  1303. wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
  1304. }
  1305. static u32 uc_addr_filter_mask(struct net_device *dev)
  1306. {
  1307. struct netdev_hw_addr *ha;
  1308. u32 nibbles;
  1309. if (dev->flags & IFF_PROMISC)
  1310. return 0;
  1311. nibbles = 1 << (dev->dev_addr[5] & 0x0f);
  1312. netdev_for_each_uc_addr(ha, dev) {
  1313. if (memcmp(dev->dev_addr, ha->addr, 5))
  1314. return 0;
  1315. if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
  1316. return 0;
  1317. nibbles |= 1 << (ha->addr[5] & 0x0f);
  1318. }
  1319. return nibbles;
  1320. }
  1321. static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
  1322. {
  1323. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1324. u32 port_config;
  1325. u32 nibbles;
  1326. int i;
  1327. uc_addr_set(mp, dev->dev_addr);
  1328. port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
  1329. nibbles = uc_addr_filter_mask(dev);
  1330. if (!nibbles) {
  1331. port_config |= UNICAST_PROMISCUOUS_MODE;
  1332. nibbles = 0xffff;
  1333. }
  1334. for (i = 0; i < 16; i += 4) {
  1335. int off = UNICAST_TABLE(mp->port_num) + i;
  1336. u32 v;
  1337. v = 0;
  1338. if (nibbles & 1)
  1339. v |= 0x00000001;
  1340. if (nibbles & 2)
  1341. v |= 0x00000100;
  1342. if (nibbles & 4)
  1343. v |= 0x00010000;
  1344. if (nibbles & 8)
  1345. v |= 0x01000000;
  1346. nibbles >>= 4;
  1347. wrl(mp, off, v);
  1348. }
  1349. wrlp(mp, PORT_CONFIG, port_config);
  1350. }
  1351. static int addr_crc(unsigned char *addr)
  1352. {
  1353. int crc = 0;
  1354. int i;
  1355. for (i = 0; i < 6; i++) {
  1356. int j;
  1357. crc = (crc ^ addr[i]) << 8;
  1358. for (j = 7; j >= 0; j--) {
  1359. if (crc & (0x100 << j))
  1360. crc ^= 0x107 << j;
  1361. }
  1362. }
  1363. return crc;
  1364. }
  1365. static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
  1366. {
  1367. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1368. u32 *mc_spec;
  1369. u32 *mc_other;
  1370. struct netdev_hw_addr *ha;
  1371. int i;
  1372. if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  1373. int port_num;
  1374. u32 accept;
  1375. oom:
  1376. port_num = mp->port_num;
  1377. accept = 0x01010101;
  1378. for (i = 0; i < 0x100; i += 4) {
  1379. wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
  1380. wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
  1381. }
  1382. return;
  1383. }
  1384. mc_spec = kmalloc(0x200, GFP_ATOMIC);
  1385. if (mc_spec == NULL)
  1386. goto oom;
  1387. mc_other = mc_spec + (0x100 >> 2);
  1388. memset(mc_spec, 0, 0x100);
  1389. memset(mc_other, 0, 0x100);
  1390. netdev_for_each_mc_addr(ha, dev) {
  1391. u8 *a = ha->addr;
  1392. u32 *table;
  1393. int entry;
  1394. if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
  1395. table = mc_spec;
  1396. entry = a[5];
  1397. } else {
  1398. table = mc_other;
  1399. entry = addr_crc(a);
  1400. }
  1401. table[entry >> 2] |= 1 << (8 * (entry & 3));
  1402. }
  1403. for (i = 0; i < 0x100; i += 4) {
  1404. wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
  1405. wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
  1406. }
  1407. kfree(mc_spec);
  1408. }
  1409. static void mv643xx_eth_set_rx_mode(struct net_device *dev)
  1410. {
  1411. mv643xx_eth_program_unicast_filter(dev);
  1412. mv643xx_eth_program_multicast_filter(dev);
  1413. }
  1414. static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
  1415. {
  1416. struct sockaddr *sa = addr;
  1417. if (!is_valid_ether_addr(sa->sa_data))
  1418. return -EADDRNOTAVAIL;
  1419. memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
  1420. netif_addr_lock_bh(dev);
  1421. mv643xx_eth_program_unicast_filter(dev);
  1422. netif_addr_unlock_bh(dev);
  1423. return 0;
  1424. }
  1425. /* rx/tx queue initialisation ***********************************************/
  1426. static int rxq_init(struct mv643xx_eth_private *mp, int index)
  1427. {
  1428. struct rx_queue *rxq = mp->rxq + index;
  1429. struct rx_desc *rx_desc;
  1430. int size;
  1431. int i;
  1432. rxq->index = index;
  1433. rxq->rx_ring_size = mp->rx_ring_size;
  1434. rxq->rx_desc_count = 0;
  1435. rxq->rx_curr_desc = 0;
  1436. rxq->rx_used_desc = 0;
  1437. size = rxq->rx_ring_size * sizeof(struct rx_desc);
  1438. if (index == 0 && size <= mp->rx_desc_sram_size) {
  1439. rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
  1440. mp->rx_desc_sram_size);
  1441. rxq->rx_desc_dma = mp->rx_desc_sram_addr;
  1442. } else {
  1443. rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
  1444. size, &rxq->rx_desc_dma,
  1445. GFP_KERNEL);
  1446. }
  1447. if (rxq->rx_desc_area == NULL) {
  1448. netdev_err(mp->dev,
  1449. "can't allocate rx ring (%d bytes)\n", size);
  1450. goto out;
  1451. }
  1452. memset(rxq->rx_desc_area, 0, size);
  1453. rxq->rx_desc_area_size = size;
  1454. rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
  1455. GFP_KERNEL);
  1456. if (rxq->rx_skb == NULL)
  1457. goto out_free;
  1458. rx_desc = rxq->rx_desc_area;
  1459. for (i = 0; i < rxq->rx_ring_size; i++) {
  1460. int nexti;
  1461. nexti = i + 1;
  1462. if (nexti == rxq->rx_ring_size)
  1463. nexti = 0;
  1464. rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
  1465. nexti * sizeof(struct rx_desc);
  1466. }
  1467. return 0;
  1468. out_free:
  1469. if (index == 0 && size <= mp->rx_desc_sram_size)
  1470. iounmap(rxq->rx_desc_area);
  1471. else
  1472. dma_free_coherent(mp->dev->dev.parent, size,
  1473. rxq->rx_desc_area,
  1474. rxq->rx_desc_dma);
  1475. out:
  1476. return -ENOMEM;
  1477. }
  1478. static void rxq_deinit(struct rx_queue *rxq)
  1479. {
  1480. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  1481. int i;
  1482. rxq_disable(rxq);
  1483. for (i = 0; i < rxq->rx_ring_size; i++) {
  1484. if (rxq->rx_skb[i]) {
  1485. dev_kfree_skb(rxq->rx_skb[i]);
  1486. rxq->rx_desc_count--;
  1487. }
  1488. }
  1489. if (rxq->rx_desc_count) {
  1490. netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
  1491. rxq->rx_desc_count);
  1492. }
  1493. if (rxq->index == 0 &&
  1494. rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
  1495. iounmap(rxq->rx_desc_area);
  1496. else
  1497. dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
  1498. rxq->rx_desc_area, rxq->rx_desc_dma);
  1499. kfree(rxq->rx_skb);
  1500. }
  1501. static int txq_init(struct mv643xx_eth_private *mp, int index)
  1502. {
  1503. struct tx_queue *txq = mp->txq + index;
  1504. struct tx_desc *tx_desc;
  1505. int size;
  1506. int i;
  1507. txq->index = index;
  1508. txq->tx_ring_size = mp->tx_ring_size;
  1509. txq->tx_desc_count = 0;
  1510. txq->tx_curr_desc = 0;
  1511. txq->tx_used_desc = 0;
  1512. size = txq->tx_ring_size * sizeof(struct tx_desc);
  1513. if (index == 0 && size <= mp->tx_desc_sram_size) {
  1514. txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
  1515. mp->tx_desc_sram_size);
  1516. txq->tx_desc_dma = mp->tx_desc_sram_addr;
  1517. } else {
  1518. txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
  1519. size, &txq->tx_desc_dma,
  1520. GFP_KERNEL);
  1521. }
  1522. if (txq->tx_desc_area == NULL) {
  1523. netdev_err(mp->dev,
  1524. "can't allocate tx ring (%d bytes)\n", size);
  1525. return -ENOMEM;
  1526. }
  1527. memset(txq->tx_desc_area, 0, size);
  1528. txq->tx_desc_area_size = size;
  1529. tx_desc = txq->tx_desc_area;
  1530. for (i = 0; i < txq->tx_ring_size; i++) {
  1531. struct tx_desc *txd = tx_desc + i;
  1532. int nexti;
  1533. nexti = i + 1;
  1534. if (nexti == txq->tx_ring_size)
  1535. nexti = 0;
  1536. txd->cmd_sts = 0;
  1537. txd->next_desc_ptr = txq->tx_desc_dma +
  1538. nexti * sizeof(struct tx_desc);
  1539. }
  1540. skb_queue_head_init(&txq->tx_skb);
  1541. return 0;
  1542. }
  1543. static void txq_deinit(struct tx_queue *txq)
  1544. {
  1545. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  1546. txq_disable(txq);
  1547. txq_reclaim(txq, txq->tx_ring_size, 1);
  1548. BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
  1549. if (txq->index == 0 &&
  1550. txq->tx_desc_area_size <= mp->tx_desc_sram_size)
  1551. iounmap(txq->tx_desc_area);
  1552. else
  1553. dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
  1554. txq->tx_desc_area, txq->tx_desc_dma);
  1555. }
  1556. /* netdev ops and related ***************************************************/
  1557. static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
  1558. {
  1559. u32 int_cause;
  1560. u32 int_cause_ext;
  1561. int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
  1562. if (int_cause == 0)
  1563. return 0;
  1564. int_cause_ext = 0;
  1565. if (int_cause & INT_EXT) {
  1566. int_cause &= ~INT_EXT;
  1567. int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
  1568. }
  1569. if (int_cause) {
  1570. wrlp(mp, INT_CAUSE, ~int_cause);
  1571. mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
  1572. ~(rdlp(mp, TXQ_COMMAND) & 0xff);
  1573. mp->work_rx |= (int_cause & INT_RX) >> 2;
  1574. }
  1575. int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
  1576. if (int_cause_ext) {
  1577. wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
  1578. if (int_cause_ext & INT_EXT_LINK_PHY)
  1579. mp->work_link = 1;
  1580. mp->work_tx |= int_cause_ext & INT_EXT_TX;
  1581. }
  1582. return 1;
  1583. }
  1584. static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
  1585. {
  1586. struct net_device *dev = (struct net_device *)dev_id;
  1587. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1588. if (unlikely(!mv643xx_eth_collect_events(mp)))
  1589. return IRQ_NONE;
  1590. wrlp(mp, INT_MASK, 0);
  1591. napi_schedule(&mp->napi);
  1592. return IRQ_HANDLED;
  1593. }
  1594. static void handle_link_event(struct mv643xx_eth_private *mp)
  1595. {
  1596. struct net_device *dev = mp->dev;
  1597. u32 port_status;
  1598. int speed;
  1599. int duplex;
  1600. int fc;
  1601. port_status = rdlp(mp, PORT_STATUS);
  1602. if (!(port_status & LINK_UP)) {
  1603. if (netif_carrier_ok(dev)) {
  1604. int i;
  1605. netdev_info(dev, "link down\n");
  1606. netif_carrier_off(dev);
  1607. for (i = 0; i < mp->txq_count; i++) {
  1608. struct tx_queue *txq = mp->txq + i;
  1609. txq_reclaim(txq, txq->tx_ring_size, 1);
  1610. txq_reset_hw_ptr(txq);
  1611. }
  1612. }
  1613. return;
  1614. }
  1615. switch (port_status & PORT_SPEED_MASK) {
  1616. case PORT_SPEED_10:
  1617. speed = 10;
  1618. break;
  1619. case PORT_SPEED_100:
  1620. speed = 100;
  1621. break;
  1622. case PORT_SPEED_1000:
  1623. speed = 1000;
  1624. break;
  1625. default:
  1626. speed = -1;
  1627. break;
  1628. }
  1629. duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
  1630. fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
  1631. netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
  1632. speed, duplex ? "full" : "half", fc ? "en" : "dis");
  1633. if (!netif_carrier_ok(dev))
  1634. netif_carrier_on(dev);
  1635. }
  1636. static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
  1637. {
  1638. struct mv643xx_eth_private *mp;
  1639. int work_done;
  1640. mp = container_of(napi, struct mv643xx_eth_private, napi);
  1641. if (unlikely(mp->oom)) {
  1642. mp->oom = 0;
  1643. del_timer(&mp->rx_oom);
  1644. }
  1645. work_done = 0;
  1646. while (work_done < budget) {
  1647. u8 queue_mask;
  1648. int queue;
  1649. int work_tbd;
  1650. if (mp->work_link) {
  1651. mp->work_link = 0;
  1652. handle_link_event(mp);
  1653. work_done++;
  1654. continue;
  1655. }
  1656. queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
  1657. if (likely(!mp->oom))
  1658. queue_mask |= mp->work_rx_refill;
  1659. if (!queue_mask) {
  1660. if (mv643xx_eth_collect_events(mp))
  1661. continue;
  1662. break;
  1663. }
  1664. queue = fls(queue_mask) - 1;
  1665. queue_mask = 1 << queue;
  1666. work_tbd = budget - work_done;
  1667. if (work_tbd > 16)
  1668. work_tbd = 16;
  1669. if (mp->work_tx_end & queue_mask) {
  1670. txq_kick(mp->txq + queue);
  1671. } else if (mp->work_tx & queue_mask) {
  1672. work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
  1673. txq_maybe_wake(mp->txq + queue);
  1674. } else if (mp->work_rx & queue_mask) {
  1675. work_done += rxq_process(mp->rxq + queue, work_tbd);
  1676. } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
  1677. work_done += rxq_refill(mp->rxq + queue, work_tbd);
  1678. } else {
  1679. BUG();
  1680. }
  1681. }
  1682. if (work_done < budget) {
  1683. if (mp->oom)
  1684. mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
  1685. napi_complete(napi);
  1686. wrlp(mp, INT_MASK, mp->int_mask);
  1687. }
  1688. return work_done;
  1689. }
  1690. static inline void oom_timer_wrapper(unsigned long data)
  1691. {
  1692. struct mv643xx_eth_private *mp = (void *)data;
  1693. napi_schedule(&mp->napi);
  1694. }
  1695. static void phy_reset(struct mv643xx_eth_private *mp)
  1696. {
  1697. int data;
  1698. data = phy_read(mp->phy, MII_BMCR);
  1699. if (data < 0)
  1700. return;
  1701. data |= BMCR_RESET;
  1702. if (phy_write(mp->phy, MII_BMCR, data) < 0)
  1703. return;
  1704. do {
  1705. data = phy_read(mp->phy, MII_BMCR);
  1706. } while (data >= 0 && data & BMCR_RESET);
  1707. }
  1708. static void port_start(struct mv643xx_eth_private *mp)
  1709. {
  1710. u32 pscr;
  1711. int i;
  1712. /*
  1713. * Perform PHY reset, if there is a PHY.
  1714. */
  1715. if (mp->phy != NULL) {
  1716. struct ethtool_cmd cmd;
  1717. mv643xx_eth_get_settings(mp->dev, &cmd);
  1718. phy_reset(mp);
  1719. mv643xx_eth_set_settings(mp->dev, &cmd);
  1720. }
  1721. /*
  1722. * Configure basic link parameters.
  1723. */
  1724. pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  1725. pscr |= SERIAL_PORT_ENABLE;
  1726. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  1727. pscr |= DO_NOT_FORCE_LINK_FAIL;
  1728. if (mp->phy == NULL)
  1729. pscr |= FORCE_LINK_PASS;
  1730. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  1731. /*
  1732. * Configure TX path and queues.
  1733. */
  1734. tx_set_rate(mp, 1000000000, 16777216);
  1735. for (i = 0; i < mp->txq_count; i++) {
  1736. struct tx_queue *txq = mp->txq + i;
  1737. txq_reset_hw_ptr(txq);
  1738. txq_set_rate(txq, 1000000000, 16777216);
  1739. txq_set_fixed_prio_mode(txq);
  1740. }
  1741. /*
  1742. * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
  1743. * frames to RX queue #0, and include the pseudo-header when
  1744. * calculating receive checksums.
  1745. */
  1746. mv643xx_eth_set_features(mp->dev, mp->dev->features);
  1747. /*
  1748. * Treat BPDUs as normal multicasts, and disable partition mode.
  1749. */
  1750. wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
  1751. /*
  1752. * Add configured unicast addresses to address filter table.
  1753. */
  1754. mv643xx_eth_program_unicast_filter(mp->dev);
  1755. /*
  1756. * Enable the receive queues.
  1757. */
  1758. for (i = 0; i < mp->rxq_count; i++) {
  1759. struct rx_queue *rxq = mp->rxq + i;
  1760. u32 addr;
  1761. addr = (u32)rxq->rx_desc_dma;
  1762. addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
  1763. wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
  1764. rxq_enable(rxq);
  1765. }
  1766. }
  1767. static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
  1768. {
  1769. int skb_size;
  1770. /*
  1771. * Reserve 2+14 bytes for an ethernet header (the hardware
  1772. * automatically prepends 2 bytes of dummy data to each
  1773. * received packet), 16 bytes for up to four VLAN tags, and
  1774. * 4 bytes for the trailing FCS -- 36 bytes total.
  1775. */
  1776. skb_size = mp->dev->mtu + 36;
  1777. /*
  1778. * Make sure that the skb size is a multiple of 8 bytes, as
  1779. * the lower three bits of the receive descriptor's buffer
  1780. * size field are ignored by the hardware.
  1781. */
  1782. mp->skb_size = (skb_size + 7) & ~7;
  1783. /*
  1784. * If NET_SKB_PAD is smaller than a cache line,
  1785. * netdev_alloc_skb() will cause skb->data to be misaligned
  1786. * to a cache line boundary. If this is the case, include
  1787. * some extra space to allow re-aligning the data area.
  1788. */
  1789. mp->skb_size += SKB_DMA_REALIGN;
  1790. }
  1791. static int mv643xx_eth_open(struct net_device *dev)
  1792. {
  1793. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1794. int err;
  1795. int i;
  1796. wrlp(mp, INT_CAUSE, 0);
  1797. wrlp(mp, INT_CAUSE_EXT, 0);
  1798. rdlp(mp, INT_CAUSE_EXT);
  1799. err = request_irq(dev->irq, mv643xx_eth_irq,
  1800. IRQF_SHARED, dev->name, dev);
  1801. if (err) {
  1802. netdev_err(dev, "can't assign irq\n");
  1803. return -EAGAIN;
  1804. }
  1805. mv643xx_eth_recalc_skb_size(mp);
  1806. napi_enable(&mp->napi);
  1807. mp->int_mask = INT_EXT;
  1808. for (i = 0; i < mp->rxq_count; i++) {
  1809. err = rxq_init(mp, i);
  1810. if (err) {
  1811. while (--i >= 0)
  1812. rxq_deinit(mp->rxq + i);
  1813. goto out;
  1814. }
  1815. rxq_refill(mp->rxq + i, INT_MAX);
  1816. mp->int_mask |= INT_RX_0 << i;
  1817. }
  1818. if (mp->oom) {
  1819. mp->rx_oom.expires = jiffies + (HZ / 10);
  1820. add_timer(&mp->rx_oom);
  1821. }
  1822. for (i = 0; i < mp->txq_count; i++) {
  1823. err = txq_init(mp, i);
  1824. if (err) {
  1825. while (--i >= 0)
  1826. txq_deinit(mp->txq + i);
  1827. goto out_free;
  1828. }
  1829. mp->int_mask |= INT_TX_END_0 << i;
  1830. }
  1831. add_timer(&mp->mib_counters_timer);
  1832. port_start(mp);
  1833. wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
  1834. wrlp(mp, INT_MASK, mp->int_mask);
  1835. return 0;
  1836. out_free:
  1837. for (i = 0; i < mp->rxq_count; i++)
  1838. rxq_deinit(mp->rxq + i);
  1839. out:
  1840. free_irq(dev->irq, dev);
  1841. return err;
  1842. }
  1843. static void port_reset(struct mv643xx_eth_private *mp)
  1844. {
  1845. unsigned int data;
  1846. int i;
  1847. for (i = 0; i < mp->rxq_count; i++)
  1848. rxq_disable(mp->rxq + i);
  1849. for (i = 0; i < mp->txq_count; i++)
  1850. txq_disable(mp->txq + i);
  1851. while (1) {
  1852. u32 ps = rdlp(mp, PORT_STATUS);
  1853. if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
  1854. break;
  1855. udelay(10);
  1856. }
  1857. /* Reset the Enable bit in the Configuration Register */
  1858. data = rdlp(mp, PORT_SERIAL_CONTROL);
  1859. data &= ~(SERIAL_PORT_ENABLE |
  1860. DO_NOT_FORCE_LINK_FAIL |
  1861. FORCE_LINK_PASS);
  1862. wrlp(mp, PORT_SERIAL_CONTROL, data);
  1863. }
  1864. static int mv643xx_eth_stop(struct net_device *dev)
  1865. {
  1866. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1867. int i;
  1868. wrlp(mp, INT_MASK_EXT, 0x00000000);
  1869. wrlp(mp, INT_MASK, 0x00000000);
  1870. rdlp(mp, INT_MASK);
  1871. napi_disable(&mp->napi);
  1872. del_timer_sync(&mp->rx_oom);
  1873. netif_carrier_off(dev);
  1874. free_irq(dev->irq, dev);
  1875. port_reset(mp);
  1876. mv643xx_eth_get_stats(dev);
  1877. mib_counters_update(mp);
  1878. del_timer_sync(&mp->mib_counters_timer);
  1879. for (i = 0; i < mp->rxq_count; i++)
  1880. rxq_deinit(mp->rxq + i);
  1881. for (i = 0; i < mp->txq_count; i++)
  1882. txq_deinit(mp->txq + i);
  1883. return 0;
  1884. }
  1885. static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1886. {
  1887. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1888. int ret;
  1889. if (mp->phy == NULL)
  1890. return -ENOTSUPP;
  1891. ret = phy_mii_ioctl(mp->phy, ifr, cmd);
  1892. if (!ret)
  1893. mv643xx_adjust_pscr(mp);
  1894. return ret;
  1895. }
  1896. static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
  1897. {
  1898. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1899. if (new_mtu < 64 || new_mtu > 9500)
  1900. return -EINVAL;
  1901. dev->mtu = new_mtu;
  1902. mv643xx_eth_recalc_skb_size(mp);
  1903. tx_set_rate(mp, 1000000000, 16777216);
  1904. if (!netif_running(dev))
  1905. return 0;
  1906. /*
  1907. * Stop and then re-open the interface. This will allocate RX
  1908. * skbs of the new MTU.
  1909. * There is a possible danger that the open will not succeed,
  1910. * due to memory being full.
  1911. */
  1912. mv643xx_eth_stop(dev);
  1913. if (mv643xx_eth_open(dev)) {
  1914. netdev_err(dev,
  1915. "fatal error on re-opening device after MTU change\n");
  1916. }
  1917. return 0;
  1918. }
  1919. static void tx_timeout_task(struct work_struct *ugly)
  1920. {
  1921. struct mv643xx_eth_private *mp;
  1922. mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
  1923. if (netif_running(mp->dev)) {
  1924. netif_tx_stop_all_queues(mp->dev);
  1925. port_reset(mp);
  1926. port_start(mp);
  1927. netif_tx_wake_all_queues(mp->dev);
  1928. }
  1929. }
  1930. static void mv643xx_eth_tx_timeout(struct net_device *dev)
  1931. {
  1932. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1933. netdev_info(dev, "tx timeout\n");
  1934. schedule_work(&mp->tx_timeout_task);
  1935. }
  1936. #ifdef CONFIG_NET_POLL_CONTROLLER
  1937. static void mv643xx_eth_netpoll(struct net_device *dev)
  1938. {
  1939. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1940. wrlp(mp, INT_MASK, 0x00000000);
  1941. rdlp(mp, INT_MASK);
  1942. mv643xx_eth_irq(dev->irq, dev);
  1943. wrlp(mp, INT_MASK, mp->int_mask);
  1944. }
  1945. #endif
  1946. /* platform glue ************************************************************/
  1947. static void
  1948. mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
  1949. const struct mbus_dram_target_info *dram)
  1950. {
  1951. void __iomem *base = msp->base;
  1952. u32 win_enable;
  1953. u32 win_protect;
  1954. int i;
  1955. for (i = 0; i < 6; i++) {
  1956. writel(0, base + WINDOW_BASE(i));
  1957. writel(0, base + WINDOW_SIZE(i));
  1958. if (i < 4)
  1959. writel(0, base + WINDOW_REMAP_HIGH(i));
  1960. }
  1961. win_enable = 0x3f;
  1962. win_protect = 0;
  1963. for (i = 0; i < dram->num_cs; i++) {
  1964. const struct mbus_dram_window *cs = dram->cs + i;
  1965. writel((cs->base & 0xffff0000) |
  1966. (cs->mbus_attr << 8) |
  1967. dram->mbus_dram_target_id, base + WINDOW_BASE(i));
  1968. writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
  1969. win_enable &= ~(1 << i);
  1970. win_protect |= 3 << (2 * i);
  1971. }
  1972. writel(win_enable, base + WINDOW_BAR_ENABLE);
  1973. msp->win_protect = win_protect;
  1974. }
  1975. static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
  1976. {
  1977. /*
  1978. * Check whether we have a 14-bit coal limit field in bits
  1979. * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
  1980. * SDMA config register.
  1981. */
  1982. writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
  1983. if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
  1984. msp->extended_rx_coal_limit = 1;
  1985. else
  1986. msp->extended_rx_coal_limit = 0;
  1987. /*
  1988. * Check whether the MAC supports TX rate control, and if
  1989. * yes, whether its associated registers are in the old or
  1990. * the new place.
  1991. */
  1992. writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
  1993. if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
  1994. msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
  1995. } else {
  1996. writel(7, msp->base + 0x0400 + TX_BW_RATE);
  1997. if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
  1998. msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
  1999. else
  2000. msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
  2001. }
  2002. }
  2003. #if defined(CONFIG_OF)
  2004. static const struct of_device_id mv643xx_eth_shared_ids[] = {
  2005. { .compatible = "marvell,orion-eth", },
  2006. { .compatible = "marvell,kirkwood-eth", },
  2007. { }
  2008. };
  2009. MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
  2010. #endif
  2011. #if defined(CONFIG_OF) && !defined(CONFIG_MV64X60)
  2012. #define mv643xx_eth_property(_np, _name, _v) \
  2013. do { \
  2014. u32 tmp; \
  2015. if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \
  2016. _v = tmp; \
  2017. } while (0)
  2018. static struct platform_device *port_platdev[3];
  2019. static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
  2020. struct device_node *pnp)
  2021. {
  2022. struct platform_device *ppdev;
  2023. struct mv643xx_eth_platform_data ppd;
  2024. struct resource res;
  2025. const char *mac_addr;
  2026. int ret;
  2027. int dev_num = 0;
  2028. memset(&ppd, 0, sizeof(ppd));
  2029. ppd.shared = pdev;
  2030. memset(&res, 0, sizeof(res));
  2031. if (!of_irq_to_resource(pnp, 0, &res)) {
  2032. dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
  2033. return -EINVAL;
  2034. }
  2035. if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
  2036. dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name);
  2037. return -EINVAL;
  2038. }
  2039. if (ppd.port_number >= 3) {
  2040. dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name);
  2041. return -EINVAL;
  2042. }
  2043. while (dev_num < 3 && port_platdev[dev_num])
  2044. dev_num++;
  2045. if (dev_num == 3) {
  2046. dev_err(&pdev->dev, "too many ports registered\n");
  2047. return -EINVAL;
  2048. }
  2049. mac_addr = of_get_mac_address(pnp);
  2050. if (mac_addr)
  2051. memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
  2052. mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
  2053. mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
  2054. mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
  2055. mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
  2056. mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
  2057. mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
  2058. ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
  2059. if (!ppd.phy_node) {
  2060. ppd.phy_addr = MV643XX_ETH_PHY_NONE;
  2061. of_property_read_u32(pnp, "speed", &ppd.speed);
  2062. of_property_read_u32(pnp, "duplex", &ppd.duplex);
  2063. }
  2064. ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
  2065. if (!ppdev)
  2066. return -ENOMEM;
  2067. ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  2068. ppdev->dev.of_node = pnp;
  2069. ret = platform_device_add_resources(ppdev, &res, 1);
  2070. if (ret)
  2071. goto port_err;
  2072. ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
  2073. if (ret)
  2074. goto port_err;
  2075. ret = platform_device_add(ppdev);
  2076. if (ret)
  2077. goto port_err;
  2078. port_platdev[dev_num] = ppdev;
  2079. return 0;
  2080. port_err:
  2081. platform_device_put(ppdev);
  2082. return ret;
  2083. }
  2084. static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
  2085. {
  2086. struct mv643xx_eth_shared_platform_data *pd;
  2087. struct device_node *pnp, *np = pdev->dev.of_node;
  2088. int ret;
  2089. /* bail out if not registered from DT */
  2090. if (!np)
  2091. return 0;
  2092. pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
  2093. if (!pd)
  2094. return -ENOMEM;
  2095. pdev->dev.platform_data = pd;
  2096. mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
  2097. for_each_available_child_of_node(np, pnp) {
  2098. ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
  2099. if (ret)
  2100. return ret;
  2101. }
  2102. return 0;
  2103. }
  2104. static void mv643xx_eth_shared_of_remove(void)
  2105. {
  2106. int n;
  2107. for (n = 0; n < 3; n++) {
  2108. platform_device_del(port_platdev[n]);
  2109. port_platdev[n] = NULL;
  2110. }
  2111. }
  2112. #else
  2113. static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
  2114. {
  2115. return 0;
  2116. }
  2117. static inline void mv643xx_eth_shared_of_remove(void)
  2118. {
  2119. }
  2120. #endif
  2121. static int mv643xx_eth_shared_probe(struct platform_device *pdev)
  2122. {
  2123. static int mv643xx_eth_version_printed;
  2124. struct mv643xx_eth_shared_platform_data *pd;
  2125. struct mv643xx_eth_shared_private *msp;
  2126. const struct mbus_dram_target_info *dram;
  2127. struct resource *res;
  2128. int ret;
  2129. if (!mv643xx_eth_version_printed++)
  2130. pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
  2131. mv643xx_eth_driver_version);
  2132. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2133. if (res == NULL)
  2134. return -EINVAL;
  2135. msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
  2136. if (msp == NULL)
  2137. return -ENOMEM;
  2138. platform_set_drvdata(pdev, msp);
  2139. msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
  2140. if (msp->base == NULL)
  2141. return -ENOMEM;
  2142. msp->clk = devm_clk_get(&pdev->dev, NULL);
  2143. if (!IS_ERR(msp->clk))
  2144. clk_prepare_enable(msp->clk);
  2145. /*
  2146. * (Re-)program MBUS remapping windows if we are asked to.
  2147. */
  2148. dram = mv_mbus_dram_info();
  2149. if (dram)
  2150. mv643xx_eth_conf_mbus_windows(msp, dram);
  2151. ret = mv643xx_eth_shared_of_probe(pdev);
  2152. if (ret)
  2153. return ret;
  2154. pd = dev_get_platdata(&pdev->dev);
  2155. msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
  2156. pd->tx_csum_limit : 9 * 1024;
  2157. infer_hw_params(msp);
  2158. return 0;
  2159. }
  2160. static int mv643xx_eth_shared_remove(struct platform_device *pdev)
  2161. {
  2162. struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
  2163. mv643xx_eth_shared_of_remove();
  2164. if (!IS_ERR(msp->clk))
  2165. clk_disable_unprepare(msp->clk);
  2166. return 0;
  2167. }
  2168. static struct platform_driver mv643xx_eth_shared_driver = {
  2169. .probe = mv643xx_eth_shared_probe,
  2170. .remove = mv643xx_eth_shared_remove,
  2171. .driver = {
  2172. .name = MV643XX_ETH_SHARED_NAME,
  2173. .owner = THIS_MODULE,
  2174. .of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
  2175. },
  2176. };
  2177. static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
  2178. {
  2179. int addr_shift = 5 * mp->port_num;
  2180. u32 data;
  2181. data = rdl(mp, PHY_ADDR);
  2182. data &= ~(0x1f << addr_shift);
  2183. data |= (phy_addr & 0x1f) << addr_shift;
  2184. wrl(mp, PHY_ADDR, data);
  2185. }
  2186. static int phy_addr_get(struct mv643xx_eth_private *mp)
  2187. {
  2188. unsigned int data;
  2189. data = rdl(mp, PHY_ADDR);
  2190. return (data >> (5 * mp->port_num)) & 0x1f;
  2191. }
  2192. static void set_params(struct mv643xx_eth_private *mp,
  2193. struct mv643xx_eth_platform_data *pd)
  2194. {
  2195. struct net_device *dev = mp->dev;
  2196. if (is_valid_ether_addr(pd->mac_addr))
  2197. memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
  2198. else
  2199. uc_addr_get(mp, dev->dev_addr);
  2200. mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
  2201. if (pd->rx_queue_size)
  2202. mp->rx_ring_size = pd->rx_queue_size;
  2203. mp->rx_desc_sram_addr = pd->rx_sram_addr;
  2204. mp->rx_desc_sram_size = pd->rx_sram_size;
  2205. mp->rxq_count = pd->rx_queue_count ? : 1;
  2206. mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
  2207. if (pd->tx_queue_size)
  2208. mp->tx_ring_size = pd->tx_queue_size;
  2209. mp->tx_desc_sram_addr = pd->tx_sram_addr;
  2210. mp->tx_desc_sram_size = pd->tx_sram_size;
  2211. mp->txq_count = pd->tx_queue_count ? : 1;
  2212. }
  2213. static void mv643xx_eth_adjust_link(struct net_device *dev)
  2214. {
  2215. struct mv643xx_eth_private *mp = netdev_priv(dev);
  2216. mv643xx_adjust_pscr(mp);
  2217. }
  2218. static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
  2219. int phy_addr)
  2220. {
  2221. struct phy_device *phydev;
  2222. int start;
  2223. int num;
  2224. int i;
  2225. char phy_id[MII_BUS_ID_SIZE + 3];
  2226. if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
  2227. start = phy_addr_get(mp) & 0x1f;
  2228. num = 32;
  2229. } else {
  2230. start = phy_addr & 0x1f;
  2231. num = 1;
  2232. }
  2233. /* Attempt to connect to the PHY using orion-mdio */
  2234. phydev = ERR_PTR(-ENODEV);
  2235. for (i = 0; i < num; i++) {
  2236. int addr = (start + i) & 0x1f;
  2237. snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
  2238. "orion-mdio-mii", addr);
  2239. phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
  2240. PHY_INTERFACE_MODE_GMII);
  2241. if (!IS_ERR(phydev)) {
  2242. phy_addr_set(mp, addr);
  2243. break;
  2244. }
  2245. }
  2246. return phydev;
  2247. }
  2248. static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
  2249. {
  2250. struct phy_device *phy = mp->phy;
  2251. phy_reset(mp);
  2252. if (speed == 0) {
  2253. phy->autoneg = AUTONEG_ENABLE;
  2254. phy->speed = 0;
  2255. phy->duplex = 0;
  2256. phy->advertising = phy->supported | ADVERTISED_Autoneg;
  2257. } else {
  2258. phy->autoneg = AUTONEG_DISABLE;
  2259. phy->advertising = 0;
  2260. phy->speed = speed;
  2261. phy->duplex = duplex;
  2262. }
  2263. phy_start_aneg(phy);
  2264. }
  2265. static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
  2266. {
  2267. u32 pscr;
  2268. pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  2269. if (pscr & SERIAL_PORT_ENABLE) {
  2270. pscr &= ~SERIAL_PORT_ENABLE;
  2271. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  2272. }
  2273. pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
  2274. if (mp->phy == NULL) {
  2275. pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
  2276. if (speed == SPEED_1000)
  2277. pscr |= SET_GMII_SPEED_TO_1000;
  2278. else if (speed == SPEED_100)
  2279. pscr |= SET_MII_SPEED_TO_100;
  2280. pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
  2281. pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
  2282. if (duplex == DUPLEX_FULL)
  2283. pscr |= SET_FULL_DUPLEX_MODE;
  2284. }
  2285. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  2286. }
  2287. static const struct net_device_ops mv643xx_eth_netdev_ops = {
  2288. .ndo_open = mv643xx_eth_open,
  2289. .ndo_stop = mv643xx_eth_stop,
  2290. .ndo_start_xmit = mv643xx_eth_xmit,
  2291. .ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
  2292. .ndo_set_mac_address = mv643xx_eth_set_mac_address,
  2293. .ndo_validate_addr = eth_validate_addr,
  2294. .ndo_do_ioctl = mv643xx_eth_ioctl,
  2295. .ndo_change_mtu = mv643xx_eth_change_mtu,
  2296. .ndo_set_features = mv643xx_eth_set_features,
  2297. .ndo_tx_timeout = mv643xx_eth_tx_timeout,
  2298. .ndo_get_stats = mv643xx_eth_get_stats,
  2299. #ifdef CONFIG_NET_POLL_CONTROLLER
  2300. .ndo_poll_controller = mv643xx_eth_netpoll,
  2301. #endif
  2302. };
  2303. static int mv643xx_eth_probe(struct platform_device *pdev)
  2304. {
  2305. struct mv643xx_eth_platform_data *pd;
  2306. struct mv643xx_eth_private *mp;
  2307. struct net_device *dev;
  2308. struct resource *res;
  2309. int err;
  2310. pd = dev_get_platdata(&pdev->dev);
  2311. if (pd == NULL) {
  2312. dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
  2313. return -ENODEV;
  2314. }
  2315. if (pd->shared == NULL) {
  2316. dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
  2317. return -ENODEV;
  2318. }
  2319. dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
  2320. if (!dev)
  2321. return -ENOMEM;
  2322. mp = netdev_priv(dev);
  2323. platform_set_drvdata(pdev, mp);
  2324. mp->shared = platform_get_drvdata(pd->shared);
  2325. mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
  2326. mp->port_num = pd->port_number;
  2327. mp->dev = dev;
  2328. /* Kirkwood resets some registers on gated clocks. Especially
  2329. * CLK125_BYPASS_EN must be cleared but is not available on
  2330. * all other SoCs/System Controllers using this driver.
  2331. */
  2332. if (of_device_is_compatible(pdev->dev.of_node,
  2333. "marvell,kirkwood-eth-port"))
  2334. wrlp(mp, PORT_SERIAL_CONTROL1,
  2335. rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
  2336. /*
  2337. * Start with a default rate, and if there is a clock, allow
  2338. * it to override the default.
  2339. */
  2340. mp->t_clk = 133000000;
  2341. mp->clk = devm_clk_get(&pdev->dev, NULL);
  2342. if (!IS_ERR(mp->clk)) {
  2343. clk_prepare_enable(mp->clk);
  2344. mp->t_clk = clk_get_rate(mp->clk);
  2345. } else if (!IS_ERR(mp->shared->clk)) {
  2346. mp->t_clk = clk_get_rate(mp->shared->clk);
  2347. }
  2348. set_params(mp, pd);
  2349. netif_set_real_num_tx_queues(dev, mp->txq_count);
  2350. netif_set_real_num_rx_queues(dev, mp->rxq_count);
  2351. err = 0;
  2352. if (pd->phy_node) {
  2353. mp->phy = of_phy_connect(mp->dev, pd->phy_node,
  2354. mv643xx_eth_adjust_link, 0,
  2355. PHY_INTERFACE_MODE_GMII);
  2356. if (!mp->phy)
  2357. err = -ENODEV;
  2358. } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
  2359. mp->phy = phy_scan(mp, pd->phy_addr);
  2360. if (IS_ERR(mp->phy))
  2361. err = PTR_ERR(mp->phy);
  2362. else
  2363. phy_init(mp, pd->speed, pd->duplex);
  2364. }
  2365. if (err == -ENODEV) {
  2366. err = -EPROBE_DEFER;
  2367. goto out;
  2368. }
  2369. if (err)
  2370. goto out;
  2371. SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
  2372. init_pscr(mp, pd->speed, pd->duplex);
  2373. mib_counters_clear(mp);
  2374. init_timer(&mp->mib_counters_timer);
  2375. mp->mib_counters_timer.data = (unsigned long)mp;
  2376. mp->mib_counters_timer.function = mib_counters_timer_wrapper;
  2377. mp->mib_counters_timer.expires = jiffies + 30 * HZ;
  2378. spin_lock_init(&mp->mib_counters_lock);
  2379. INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
  2380. netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
  2381. init_timer(&mp->rx_oom);
  2382. mp->rx_oom.data = (unsigned long)mp;
  2383. mp->rx_oom.function = oom_timer_wrapper;
  2384. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  2385. BUG_ON(!res);
  2386. dev->irq = res->start;
  2387. dev->netdev_ops = &mv643xx_eth_netdev_ops;
  2388. dev->watchdog_timeo = 2 * HZ;
  2389. dev->base_addr = 0;
  2390. dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
  2391. dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
  2392. dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
  2393. dev->priv_flags |= IFF_UNICAST_FLT;
  2394. SET_NETDEV_DEV(dev, &pdev->dev);
  2395. if (mp->shared->win_protect)
  2396. wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
  2397. netif_carrier_off(dev);
  2398. wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
  2399. set_rx_coal(mp, 250);
  2400. set_tx_coal(mp, 0);
  2401. err = register_netdev(dev);
  2402. if (err)
  2403. goto out;
  2404. netdev_notice(dev, "port %d with MAC address %pM\n",
  2405. mp->port_num, dev->dev_addr);
  2406. if (mp->tx_desc_sram_size > 0)
  2407. netdev_notice(dev, "configured with sram\n");
  2408. return 0;
  2409. out:
  2410. if (!IS_ERR(mp->clk))
  2411. clk_disable_unprepare(mp->clk);
  2412. free_netdev(dev);
  2413. return err;
  2414. }
  2415. static int mv643xx_eth_remove(struct platform_device *pdev)
  2416. {
  2417. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2418. unregister_netdev(mp->dev);
  2419. if (mp->phy != NULL)
  2420. phy_disconnect(mp->phy);
  2421. cancel_work_sync(&mp->tx_timeout_task);
  2422. if (!IS_ERR(mp->clk))
  2423. clk_disable_unprepare(mp->clk);
  2424. free_netdev(mp->dev);
  2425. return 0;
  2426. }
  2427. static void mv643xx_eth_shutdown(struct platform_device *pdev)
  2428. {
  2429. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2430. /* Mask all interrupts on ethernet port */
  2431. wrlp(mp, INT_MASK, 0);
  2432. rdlp(mp, INT_MASK);
  2433. if (netif_running(mp->dev))
  2434. port_reset(mp);
  2435. }
  2436. static struct platform_driver mv643xx_eth_driver = {
  2437. .probe = mv643xx_eth_probe,
  2438. .remove = mv643xx_eth_remove,
  2439. .shutdown = mv643xx_eth_shutdown,
  2440. .driver = {
  2441. .name = MV643XX_ETH_NAME,
  2442. .owner = THIS_MODULE,
  2443. },
  2444. };
  2445. static int __init mv643xx_eth_init_module(void)
  2446. {
  2447. int rc;
  2448. rc = platform_driver_register(&mv643xx_eth_shared_driver);
  2449. if (!rc) {
  2450. rc = platform_driver_register(&mv643xx_eth_driver);
  2451. if (rc)
  2452. platform_driver_unregister(&mv643xx_eth_shared_driver);
  2453. }
  2454. return rc;
  2455. }
  2456. module_init(mv643xx_eth_init_module);
  2457. static void __exit mv643xx_eth_cleanup_module(void)
  2458. {
  2459. platform_driver_unregister(&mv643xx_eth_driver);
  2460. platform_driver_unregister(&mv643xx_eth_shared_driver);
  2461. }
  2462. module_exit(mv643xx_eth_cleanup_module);
  2463. MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
  2464. "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
  2465. MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
  2466. MODULE_LICENSE("GPL");
  2467. MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
  2468. MODULE_ALIAS("platform:" MV643XX_ETH_NAME);