mv643xx_eth.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813
  1. /*
  2. * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
  3. * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
  4. *
  5. * Based on the 64360 driver from:
  6. * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
  7. * Rabeeh Khoury <rabeeh@marvell.com>
  8. *
  9. * Copyright (C) 2003 PMC-Sierra, Inc.,
  10. * written by Manish Lachwani
  11. *
  12. * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
  13. *
  14. * Copyright (C) 2004-2006 MontaVista Software, Inc.
  15. * Dale Farnsworth <dale@farnsworth.org>
  16. *
  17. * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
  18. * <sjhill@realitydiluted.com>
  19. *
  20. * Copyright (C) 2007-2008 Marvell Semiconductor
  21. * Lennert Buytenhek <buytenh@marvell.com>
  22. *
  23. * This program is free software; you can redistribute it and/or
  24. * modify it under the terms of the GNU General Public License
  25. * as published by the Free Software Foundation; either version 2
  26. * of the License, or (at your option) any later version.
  27. *
  28. * This program is distributed in the hope that it will be useful,
  29. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  31. * GNU General Public License for more details.
  32. *
  33. * You should have received a copy of the GNU General Public License
  34. * along with this program; if not, write to the Free Software
  35. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  36. */
  37. #include <linux/init.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/in.h>
  40. #include <linux/ip.h>
  41. #include <linux/tcp.h>
  42. #include <linux/udp.h>
  43. #include <linux/etherdevice.h>
  44. #include <linux/delay.h>
  45. #include <linux/ethtool.h>
  46. #include <linux/platform_device.h>
  47. #include <linux/module.h>
  48. #include <linux/kernel.h>
  49. #include <linux/spinlock.h>
  50. #include <linux/workqueue.h>
  51. #include <linux/phy.h>
  52. #include <linux/mv643xx_eth.h>
  53. #include <linux/io.h>
  54. #include <linux/types.h>
  55. #include <asm/system.h>
  56. static char mv643xx_eth_driver_name[] = "mv643xx_eth";
  57. static char mv643xx_eth_driver_version[] = "1.4";
  58. /*
  59. * Registers shared between all ports.
  60. */
  61. #define PHY_ADDR 0x0000
  62. #define SMI_REG 0x0004
  63. #define SMI_BUSY 0x10000000
  64. #define SMI_READ_VALID 0x08000000
  65. #define SMI_OPCODE_READ 0x04000000
  66. #define SMI_OPCODE_WRITE 0x00000000
  67. #define ERR_INT_CAUSE 0x0080
  68. #define ERR_INT_SMI_DONE 0x00000010
  69. #define ERR_INT_MASK 0x0084
  70. #define WINDOW_BASE(w) (0x0200 + ((w) << 3))
  71. #define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
  72. #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
  73. #define WINDOW_BAR_ENABLE 0x0290
  74. #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
  75. /*
  76. * Main per-port registers. These live at offset 0x0400 for
  77. * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
  78. */
  79. #define PORT_CONFIG 0x0000
  80. #define UNICAST_PROMISCUOUS_MODE 0x00000001
  81. #define PORT_CONFIG_EXT 0x0004
  82. #define MAC_ADDR_LOW 0x0014
  83. #define MAC_ADDR_HIGH 0x0018
  84. #define SDMA_CONFIG 0x001c
  85. #define PORT_SERIAL_CONTROL 0x003c
  86. #define PORT_STATUS 0x0044
  87. #define TX_FIFO_EMPTY 0x00000400
  88. #define TX_IN_PROGRESS 0x00000080
  89. #define PORT_SPEED_MASK 0x00000030
  90. #define PORT_SPEED_1000 0x00000010
  91. #define PORT_SPEED_100 0x00000020
  92. #define PORT_SPEED_10 0x00000000
  93. #define FLOW_CONTROL_ENABLED 0x00000008
  94. #define FULL_DUPLEX 0x00000004
  95. #define LINK_UP 0x00000002
  96. #define TXQ_COMMAND 0x0048
  97. #define TXQ_FIX_PRIO_CONF 0x004c
  98. #define TX_BW_RATE 0x0050
  99. #define TX_BW_MTU 0x0058
  100. #define TX_BW_BURST 0x005c
  101. #define INT_CAUSE 0x0060
  102. #define INT_TX_END 0x07f80000
  103. #define INT_RX 0x000003fc
  104. #define INT_EXT 0x00000002
  105. #define INT_CAUSE_EXT 0x0064
  106. #define INT_EXT_LINK_PHY 0x00110000
  107. #define INT_EXT_TX 0x000000ff
  108. #define INT_MASK 0x0068
  109. #define INT_MASK_EXT 0x006c
  110. #define TX_FIFO_URGENT_THRESHOLD 0x0074
  111. #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
  112. #define TX_BW_RATE_MOVED 0x00e0
  113. #define TX_BW_MTU_MOVED 0x00e8
  114. #define TX_BW_BURST_MOVED 0x00ec
  115. #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
  116. #define RXQ_COMMAND 0x0280
  117. #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
  118. #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
  119. #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
  120. #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
  121. /*
  122. * Misc per-port registers.
  123. */
  124. #define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
  125. #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
  126. #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
  127. #define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
  128. /*
  129. * SDMA configuration register.
  130. */
  131. #define RX_BURST_SIZE_4_64BIT (2 << 1)
  132. #define RX_BURST_SIZE_16_64BIT (4 << 1)
  133. #define BLM_RX_NO_SWAP (1 << 4)
  134. #define BLM_TX_NO_SWAP (1 << 5)
  135. #define TX_BURST_SIZE_4_64BIT (2 << 22)
  136. #define TX_BURST_SIZE_16_64BIT (4 << 22)
  137. #if defined(__BIG_ENDIAN)
  138. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  139. (RX_BURST_SIZE_4_64BIT | \
  140. TX_BURST_SIZE_4_64BIT)
  141. #elif defined(__LITTLE_ENDIAN)
  142. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  143. (RX_BURST_SIZE_4_64BIT | \
  144. BLM_RX_NO_SWAP | \
  145. BLM_TX_NO_SWAP | \
  146. TX_BURST_SIZE_4_64BIT)
  147. #else
  148. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  149. #endif
  150. /*
  151. * Port serial control register.
  152. */
  153. #define SET_MII_SPEED_TO_100 (1 << 24)
  154. #define SET_GMII_SPEED_TO_1000 (1 << 23)
  155. #define SET_FULL_DUPLEX_MODE (1 << 21)
  156. #define MAX_RX_PACKET_9700BYTE (5 << 17)
  157. #define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
  158. #define DO_NOT_FORCE_LINK_FAIL (1 << 10)
  159. #define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
  160. #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
  161. #define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2)
  162. #define FORCE_LINK_PASS (1 << 1)
  163. #define SERIAL_PORT_ENABLE (1 << 0)
  164. #define DEFAULT_RX_QUEUE_SIZE 128
  165. #define DEFAULT_TX_QUEUE_SIZE 256
  166. /*
  167. * RX/TX descriptors.
  168. */
  169. #if defined(__BIG_ENDIAN)
  170. struct rx_desc {
  171. u16 byte_cnt; /* Descriptor buffer byte count */
  172. u16 buf_size; /* Buffer size */
  173. u32 cmd_sts; /* Descriptor command status */
  174. u32 next_desc_ptr; /* Next descriptor pointer */
  175. u32 buf_ptr; /* Descriptor buffer pointer */
  176. };
  177. struct tx_desc {
  178. u16 byte_cnt; /* buffer byte count */
  179. u16 l4i_chk; /* CPU provided TCP checksum */
  180. u32 cmd_sts; /* Command/status field */
  181. u32 next_desc_ptr; /* Pointer to next descriptor */
  182. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  183. };
  184. #elif defined(__LITTLE_ENDIAN)
  185. struct rx_desc {
  186. u32 cmd_sts; /* Descriptor command status */
  187. u16 buf_size; /* Buffer size */
  188. u16 byte_cnt; /* Descriptor buffer byte count */
  189. u32 buf_ptr; /* Descriptor buffer pointer */
  190. u32 next_desc_ptr; /* Next descriptor pointer */
  191. };
  192. struct tx_desc {
  193. u32 cmd_sts; /* Command/status field */
  194. u16 l4i_chk; /* CPU provided TCP checksum */
  195. u16 byte_cnt; /* buffer byte count */
  196. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  197. u32 next_desc_ptr; /* Pointer to next descriptor */
  198. };
  199. #else
  200. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  201. #endif
  202. /* RX & TX descriptor command */
  203. #define BUFFER_OWNED_BY_DMA 0x80000000
  204. /* RX & TX descriptor status */
  205. #define ERROR_SUMMARY 0x00000001
  206. /* RX descriptor status */
  207. #define LAYER_4_CHECKSUM_OK 0x40000000
  208. #define RX_ENABLE_INTERRUPT 0x20000000
  209. #define RX_FIRST_DESC 0x08000000
  210. #define RX_LAST_DESC 0x04000000
  211. /* TX descriptor command */
  212. #define TX_ENABLE_INTERRUPT 0x00800000
  213. #define GEN_CRC 0x00400000
  214. #define TX_FIRST_DESC 0x00200000
  215. #define TX_LAST_DESC 0x00100000
  216. #define ZERO_PADDING 0x00080000
  217. #define GEN_IP_V4_CHECKSUM 0x00040000
  218. #define GEN_TCP_UDP_CHECKSUM 0x00020000
  219. #define UDP_FRAME 0x00010000
  220. #define MAC_HDR_EXTRA_4_BYTES 0x00008000
  221. #define MAC_HDR_EXTRA_8_BYTES 0x00000200
  222. #define TX_IHL_SHIFT 11
  223. /* global *******************************************************************/
  224. struct mv643xx_eth_shared_private {
  225. /*
  226. * Ethernet controller base address.
  227. */
  228. void __iomem *base;
  229. /*
  230. * Points at the right SMI instance to use.
  231. */
  232. struct mv643xx_eth_shared_private *smi;
  233. /*
  234. * Provides access to local SMI interface.
  235. */
  236. struct mii_bus *smi_bus;
  237. /*
  238. * If we have access to the error interrupt pin (which is
  239. * somewhat misnamed as it not only reflects internal errors
  240. * but also reflects SMI completion), use that to wait for
  241. * SMI access completion instead of polling the SMI busy bit.
  242. */
  243. int err_interrupt;
  244. wait_queue_head_t smi_busy_wait;
  245. /*
  246. * Per-port MBUS window access register value.
  247. */
  248. u32 win_protect;
  249. /*
  250. * Hardware-specific parameters.
  251. */
  252. unsigned int t_clk;
  253. int extended_rx_coal_limit;
  254. int tx_bw_control;
  255. };
  256. #define TX_BW_CONTROL_ABSENT 0
  257. #define TX_BW_CONTROL_OLD_LAYOUT 1
  258. #define TX_BW_CONTROL_NEW_LAYOUT 2
  259. /* per-port *****************************************************************/
  260. struct mib_counters {
  261. u64 good_octets_received;
  262. u32 bad_octets_received;
  263. u32 internal_mac_transmit_err;
  264. u32 good_frames_received;
  265. u32 bad_frames_received;
  266. u32 broadcast_frames_received;
  267. u32 multicast_frames_received;
  268. u32 frames_64_octets;
  269. u32 frames_65_to_127_octets;
  270. u32 frames_128_to_255_octets;
  271. u32 frames_256_to_511_octets;
  272. u32 frames_512_to_1023_octets;
  273. u32 frames_1024_to_max_octets;
  274. u64 good_octets_sent;
  275. u32 good_frames_sent;
  276. u32 excessive_collision;
  277. u32 multicast_frames_sent;
  278. u32 broadcast_frames_sent;
  279. u32 unrec_mac_control_received;
  280. u32 fc_sent;
  281. u32 good_fc_received;
  282. u32 bad_fc_received;
  283. u32 undersize_received;
  284. u32 fragments_received;
  285. u32 oversize_received;
  286. u32 jabber_received;
  287. u32 mac_receive_error;
  288. u32 bad_crc_event;
  289. u32 collision;
  290. u32 late_collision;
  291. };
  292. struct rx_queue {
  293. int index;
  294. int rx_ring_size;
  295. int rx_desc_count;
  296. int rx_curr_desc;
  297. int rx_used_desc;
  298. struct rx_desc *rx_desc_area;
  299. dma_addr_t rx_desc_dma;
  300. int rx_desc_area_size;
  301. struct sk_buff **rx_skb;
  302. };
  303. struct tx_queue {
  304. int index;
  305. int tx_ring_size;
  306. int tx_desc_count;
  307. int tx_curr_desc;
  308. int tx_used_desc;
  309. struct tx_desc *tx_desc_area;
  310. dma_addr_t tx_desc_dma;
  311. int tx_desc_area_size;
  312. struct sk_buff_head tx_skb;
  313. unsigned long tx_packets;
  314. unsigned long tx_bytes;
  315. unsigned long tx_dropped;
  316. };
  317. struct mv643xx_eth_private {
  318. struct mv643xx_eth_shared_private *shared;
  319. void __iomem *base;
  320. int port_num;
  321. struct net_device *dev;
  322. struct phy_device *phy;
  323. struct timer_list mib_counters_timer;
  324. spinlock_t mib_counters_lock;
  325. struct mib_counters mib_counters;
  326. struct work_struct tx_timeout_task;
  327. struct napi_struct napi;
  328. u8 work_link;
  329. u8 work_tx;
  330. u8 work_tx_end;
  331. u8 work_rx;
  332. u8 work_rx_refill;
  333. u8 work_rx_oom;
  334. int skb_size;
  335. struct sk_buff_head rx_recycle;
  336. /*
  337. * RX state.
  338. */
  339. int default_rx_ring_size;
  340. unsigned long rx_desc_sram_addr;
  341. int rx_desc_sram_size;
  342. int rxq_count;
  343. struct timer_list rx_oom;
  344. struct rx_queue rxq[8];
  345. /*
  346. * TX state.
  347. */
  348. int default_tx_ring_size;
  349. unsigned long tx_desc_sram_addr;
  350. int tx_desc_sram_size;
  351. int txq_count;
  352. struct tx_queue txq[8];
  353. };
  354. /* port register accessors **************************************************/
  355. static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
  356. {
  357. return readl(mp->shared->base + offset);
  358. }
  359. static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
  360. {
  361. return readl(mp->base + offset);
  362. }
  363. static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
  364. {
  365. writel(data, mp->shared->base + offset);
  366. }
  367. static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
  368. {
  369. writel(data, mp->base + offset);
  370. }
  371. /* rxq/txq helper functions *************************************************/
  372. static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
  373. {
  374. return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
  375. }
  376. static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
  377. {
  378. return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
  379. }
  380. static void rxq_enable(struct rx_queue *rxq)
  381. {
  382. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  383. wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
  384. }
  385. static void rxq_disable(struct rx_queue *rxq)
  386. {
  387. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  388. u8 mask = 1 << rxq->index;
  389. wrlp(mp, RXQ_COMMAND, mask << 8);
  390. while (rdlp(mp, RXQ_COMMAND) & mask)
  391. udelay(10);
  392. }
  393. static void txq_reset_hw_ptr(struct tx_queue *txq)
  394. {
  395. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  396. u32 addr;
  397. addr = (u32)txq->tx_desc_dma;
  398. addr += txq->tx_curr_desc * sizeof(struct tx_desc);
  399. wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
  400. }
  401. static void txq_enable(struct tx_queue *txq)
  402. {
  403. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  404. wrlp(mp, TXQ_COMMAND, 1 << txq->index);
  405. }
  406. static void txq_disable(struct tx_queue *txq)
  407. {
  408. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  409. u8 mask = 1 << txq->index;
  410. wrlp(mp, TXQ_COMMAND, mask << 8);
  411. while (rdlp(mp, TXQ_COMMAND) & mask)
  412. udelay(10);
  413. }
  414. static void txq_maybe_wake(struct tx_queue *txq)
  415. {
  416. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  417. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  418. if (netif_tx_queue_stopped(nq)) {
  419. __netif_tx_lock(nq, smp_processor_id());
  420. if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
  421. netif_tx_wake_queue(nq);
  422. __netif_tx_unlock(nq);
  423. }
  424. }
  425. /* rx napi ******************************************************************/
  426. static int rxq_process(struct rx_queue *rxq, int budget)
  427. {
  428. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  429. struct net_device_stats *stats = &mp->dev->stats;
  430. int rx;
  431. rx = 0;
  432. while (rx < budget && rxq->rx_desc_count) {
  433. struct rx_desc *rx_desc;
  434. unsigned int cmd_sts;
  435. struct sk_buff *skb;
  436. u16 byte_cnt;
  437. rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
  438. cmd_sts = rx_desc->cmd_sts;
  439. if (cmd_sts & BUFFER_OWNED_BY_DMA)
  440. break;
  441. rmb();
  442. skb = rxq->rx_skb[rxq->rx_curr_desc];
  443. rxq->rx_skb[rxq->rx_curr_desc] = NULL;
  444. rxq->rx_curr_desc++;
  445. if (rxq->rx_curr_desc == rxq->rx_ring_size)
  446. rxq->rx_curr_desc = 0;
  447. dma_unmap_single(NULL, rx_desc->buf_ptr,
  448. rx_desc->buf_size, DMA_FROM_DEVICE);
  449. rxq->rx_desc_count--;
  450. rx++;
  451. mp->work_rx_refill |= 1 << rxq->index;
  452. byte_cnt = rx_desc->byte_cnt;
  453. /*
  454. * Update statistics.
  455. *
  456. * Note that the descriptor byte count includes 2 dummy
  457. * bytes automatically inserted by the hardware at the
  458. * start of the packet (which we don't count), and a 4
  459. * byte CRC at the end of the packet (which we do count).
  460. */
  461. stats->rx_packets++;
  462. stats->rx_bytes += byte_cnt - 2;
  463. /*
  464. * In case we received a packet without first / last bits
  465. * on, or the error summary bit is set, the packet needs
  466. * to be dropped.
  467. */
  468. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
  469. != (RX_FIRST_DESC | RX_LAST_DESC))
  470. goto err;
  471. /*
  472. * The -4 is for the CRC in the trailer of the
  473. * received packet
  474. */
  475. skb_put(skb, byte_cnt - 2 - 4);
  476. if (cmd_sts & LAYER_4_CHECKSUM_OK)
  477. skb->ip_summed = CHECKSUM_UNNECESSARY;
  478. skb->protocol = eth_type_trans(skb, mp->dev);
  479. netif_receive_skb(skb);
  480. continue;
  481. err:
  482. stats->rx_dropped++;
  483. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  484. (RX_FIRST_DESC | RX_LAST_DESC)) {
  485. if (net_ratelimit())
  486. dev_printk(KERN_ERR, &mp->dev->dev,
  487. "received packet spanning "
  488. "multiple descriptors\n");
  489. }
  490. if (cmd_sts & ERROR_SUMMARY)
  491. stats->rx_errors++;
  492. dev_kfree_skb(skb);
  493. }
  494. if (rx < budget)
  495. mp->work_rx &= ~(1 << rxq->index);
  496. return rx;
  497. }
  498. static int rxq_refill(struct rx_queue *rxq, int budget)
  499. {
  500. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  501. int refilled;
  502. refilled = 0;
  503. while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
  504. struct sk_buff *skb;
  505. int unaligned;
  506. int rx;
  507. struct rx_desc *rx_desc;
  508. skb = __skb_dequeue(&mp->rx_recycle);
  509. if (skb == NULL)
  510. skb = dev_alloc_skb(mp->skb_size +
  511. dma_get_cache_alignment() - 1);
  512. if (skb == NULL) {
  513. mp->work_rx_oom |= 1 << rxq->index;
  514. goto oom;
  515. }
  516. unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
  517. if (unaligned)
  518. skb_reserve(skb, dma_get_cache_alignment() - unaligned);
  519. refilled++;
  520. rxq->rx_desc_count++;
  521. rx = rxq->rx_used_desc++;
  522. if (rxq->rx_used_desc == rxq->rx_ring_size)
  523. rxq->rx_used_desc = 0;
  524. rx_desc = rxq->rx_desc_area + rx;
  525. rx_desc->buf_ptr = dma_map_single(NULL, skb->data,
  526. mp->skb_size, DMA_FROM_DEVICE);
  527. rx_desc->buf_size = mp->skb_size;
  528. rxq->rx_skb[rx] = skb;
  529. wmb();
  530. rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
  531. wmb();
  532. /*
  533. * The hardware automatically prepends 2 bytes of
  534. * dummy data to each received packet, so that the
  535. * IP header ends up 16-byte aligned.
  536. */
  537. skb_reserve(skb, 2);
  538. }
  539. if (refilled < budget)
  540. mp->work_rx_refill &= ~(1 << rxq->index);
  541. oom:
  542. return refilled;
  543. }
  544. /* tx ***********************************************************************/
  545. static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
  546. {
  547. int frag;
  548. for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
  549. skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
  550. if (fragp->size <= 8 && fragp->page_offset & 7)
  551. return 1;
  552. }
  553. return 0;
  554. }
  555. static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
  556. {
  557. int nr_frags = skb_shinfo(skb)->nr_frags;
  558. int frag;
  559. for (frag = 0; frag < nr_frags; frag++) {
  560. skb_frag_t *this_frag;
  561. int tx_index;
  562. struct tx_desc *desc;
  563. this_frag = &skb_shinfo(skb)->frags[frag];
  564. tx_index = txq->tx_curr_desc++;
  565. if (txq->tx_curr_desc == txq->tx_ring_size)
  566. txq->tx_curr_desc = 0;
  567. desc = &txq->tx_desc_area[tx_index];
  568. /*
  569. * The last fragment will generate an interrupt
  570. * which will free the skb on TX completion.
  571. */
  572. if (frag == nr_frags - 1) {
  573. desc->cmd_sts = BUFFER_OWNED_BY_DMA |
  574. ZERO_PADDING | TX_LAST_DESC |
  575. TX_ENABLE_INTERRUPT;
  576. } else {
  577. desc->cmd_sts = BUFFER_OWNED_BY_DMA;
  578. }
  579. desc->l4i_chk = 0;
  580. desc->byte_cnt = this_frag->size;
  581. desc->buf_ptr = dma_map_page(NULL, this_frag->page,
  582. this_frag->page_offset,
  583. this_frag->size,
  584. DMA_TO_DEVICE);
  585. }
  586. }
  587. static inline __be16 sum16_as_be(__sum16 sum)
  588. {
  589. return (__force __be16)sum;
  590. }
  591. static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
  592. {
  593. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  594. int nr_frags = skb_shinfo(skb)->nr_frags;
  595. int tx_index;
  596. struct tx_desc *desc;
  597. u32 cmd_sts;
  598. u16 l4i_chk;
  599. int length;
  600. cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
  601. l4i_chk = 0;
  602. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  603. int tag_bytes;
  604. BUG_ON(skb->protocol != htons(ETH_P_IP) &&
  605. skb->protocol != htons(ETH_P_8021Q));
  606. tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN;
  607. if (unlikely(tag_bytes & ~12)) {
  608. if (skb_checksum_help(skb) == 0)
  609. goto no_csum;
  610. kfree_skb(skb);
  611. return 1;
  612. }
  613. if (tag_bytes & 4)
  614. cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
  615. if (tag_bytes & 8)
  616. cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
  617. cmd_sts |= GEN_TCP_UDP_CHECKSUM |
  618. GEN_IP_V4_CHECKSUM |
  619. ip_hdr(skb)->ihl << TX_IHL_SHIFT;
  620. switch (ip_hdr(skb)->protocol) {
  621. case IPPROTO_UDP:
  622. cmd_sts |= UDP_FRAME;
  623. l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
  624. break;
  625. case IPPROTO_TCP:
  626. l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
  627. break;
  628. default:
  629. BUG();
  630. }
  631. } else {
  632. no_csum:
  633. /* Errata BTS #50, IHL must be 5 if no HW checksum */
  634. cmd_sts |= 5 << TX_IHL_SHIFT;
  635. }
  636. tx_index = txq->tx_curr_desc++;
  637. if (txq->tx_curr_desc == txq->tx_ring_size)
  638. txq->tx_curr_desc = 0;
  639. desc = &txq->tx_desc_area[tx_index];
  640. if (nr_frags) {
  641. txq_submit_frag_skb(txq, skb);
  642. length = skb_headlen(skb);
  643. } else {
  644. cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
  645. length = skb->len;
  646. }
  647. desc->l4i_chk = l4i_chk;
  648. desc->byte_cnt = length;
  649. desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
  650. __skb_queue_tail(&txq->tx_skb, skb);
  651. /* ensure all other descriptors are written before first cmd_sts */
  652. wmb();
  653. desc->cmd_sts = cmd_sts;
  654. /* clear TX_END status */
  655. mp->work_tx_end &= ~(1 << txq->index);
  656. /* ensure all descriptors are written before poking hardware */
  657. wmb();
  658. txq_enable(txq);
  659. txq->tx_desc_count += nr_frags + 1;
  660. return 0;
  661. }
  662. static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  663. {
  664. struct mv643xx_eth_private *mp = netdev_priv(dev);
  665. int queue;
  666. struct tx_queue *txq;
  667. struct netdev_queue *nq;
  668. queue = skb_get_queue_mapping(skb);
  669. txq = mp->txq + queue;
  670. nq = netdev_get_tx_queue(dev, queue);
  671. if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
  672. txq->tx_dropped++;
  673. dev_printk(KERN_DEBUG, &dev->dev,
  674. "failed to linearize skb with tiny "
  675. "unaligned fragment\n");
  676. return NETDEV_TX_BUSY;
  677. }
  678. if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
  679. if (net_ratelimit())
  680. dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
  681. kfree_skb(skb);
  682. return NETDEV_TX_OK;
  683. }
  684. if (!txq_submit_skb(txq, skb)) {
  685. int entries_left;
  686. txq->tx_bytes += skb->len;
  687. txq->tx_packets++;
  688. dev->trans_start = jiffies;
  689. entries_left = txq->tx_ring_size - txq->tx_desc_count;
  690. if (entries_left < MAX_SKB_FRAGS + 1)
  691. netif_tx_stop_queue(nq);
  692. }
  693. return NETDEV_TX_OK;
  694. }
  695. /* tx napi ******************************************************************/
  696. static void txq_kick(struct tx_queue *txq)
  697. {
  698. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  699. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  700. u32 hw_desc_ptr;
  701. u32 expected_ptr;
  702. __netif_tx_lock(nq, smp_processor_id());
  703. if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
  704. goto out;
  705. hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
  706. expected_ptr = (u32)txq->tx_desc_dma +
  707. txq->tx_curr_desc * sizeof(struct tx_desc);
  708. if (hw_desc_ptr != expected_ptr)
  709. txq_enable(txq);
  710. out:
  711. __netif_tx_unlock(nq);
  712. mp->work_tx_end &= ~(1 << txq->index);
  713. }
  714. static int txq_reclaim(struct tx_queue *txq, int budget, int force)
  715. {
  716. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  717. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  718. int reclaimed;
  719. __netif_tx_lock(nq, smp_processor_id());
  720. reclaimed = 0;
  721. while (reclaimed < budget && txq->tx_desc_count > 0) {
  722. int tx_index;
  723. struct tx_desc *desc;
  724. u32 cmd_sts;
  725. struct sk_buff *skb;
  726. tx_index = txq->tx_used_desc;
  727. desc = &txq->tx_desc_area[tx_index];
  728. cmd_sts = desc->cmd_sts;
  729. if (cmd_sts & BUFFER_OWNED_BY_DMA) {
  730. if (!force)
  731. break;
  732. desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
  733. }
  734. txq->tx_used_desc = tx_index + 1;
  735. if (txq->tx_used_desc == txq->tx_ring_size)
  736. txq->tx_used_desc = 0;
  737. reclaimed++;
  738. txq->tx_desc_count--;
  739. skb = NULL;
  740. if (cmd_sts & TX_LAST_DESC)
  741. skb = __skb_dequeue(&txq->tx_skb);
  742. if (cmd_sts & ERROR_SUMMARY) {
  743. dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
  744. mp->dev->stats.tx_errors++;
  745. }
  746. if (cmd_sts & TX_FIRST_DESC) {
  747. dma_unmap_single(NULL, desc->buf_ptr,
  748. desc->byte_cnt, DMA_TO_DEVICE);
  749. } else {
  750. dma_unmap_page(NULL, desc->buf_ptr,
  751. desc->byte_cnt, DMA_TO_DEVICE);
  752. }
  753. if (skb != NULL) {
  754. if (skb_queue_len(&mp->rx_recycle) <
  755. mp->default_rx_ring_size &&
  756. skb_recycle_check(skb, mp->skb_size +
  757. dma_get_cache_alignment() - 1))
  758. __skb_queue_head(&mp->rx_recycle, skb);
  759. else
  760. dev_kfree_skb(skb);
  761. }
  762. }
  763. __netif_tx_unlock(nq);
  764. if (reclaimed < budget)
  765. mp->work_tx &= ~(1 << txq->index);
  766. return reclaimed;
  767. }
  768. /* tx rate control **********************************************************/
  769. /*
  770. * Set total maximum TX rate (shared by all TX queues for this port)
  771. * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
  772. */
  773. static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
  774. {
  775. int token_rate;
  776. int mtu;
  777. int bucket_size;
  778. token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
  779. if (token_rate > 1023)
  780. token_rate = 1023;
  781. mtu = (mp->dev->mtu + 255) >> 8;
  782. if (mtu > 63)
  783. mtu = 63;
  784. bucket_size = (burst + 255) >> 8;
  785. if (bucket_size > 65535)
  786. bucket_size = 65535;
  787. switch (mp->shared->tx_bw_control) {
  788. case TX_BW_CONTROL_OLD_LAYOUT:
  789. wrlp(mp, TX_BW_RATE, token_rate);
  790. wrlp(mp, TX_BW_MTU, mtu);
  791. wrlp(mp, TX_BW_BURST, bucket_size);
  792. break;
  793. case TX_BW_CONTROL_NEW_LAYOUT:
  794. wrlp(mp, TX_BW_RATE_MOVED, token_rate);
  795. wrlp(mp, TX_BW_MTU_MOVED, mtu);
  796. wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
  797. break;
  798. }
  799. }
  800. static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
  801. {
  802. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  803. int token_rate;
  804. int bucket_size;
  805. token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
  806. if (token_rate > 1023)
  807. token_rate = 1023;
  808. bucket_size = (burst + 255) >> 8;
  809. if (bucket_size > 65535)
  810. bucket_size = 65535;
  811. wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
  812. wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
  813. }
  814. static void txq_set_fixed_prio_mode(struct tx_queue *txq)
  815. {
  816. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  817. int off;
  818. u32 val;
  819. /*
  820. * Turn on fixed priority mode.
  821. */
  822. off = 0;
  823. switch (mp->shared->tx_bw_control) {
  824. case TX_BW_CONTROL_OLD_LAYOUT:
  825. off = TXQ_FIX_PRIO_CONF;
  826. break;
  827. case TX_BW_CONTROL_NEW_LAYOUT:
  828. off = TXQ_FIX_PRIO_CONF_MOVED;
  829. break;
  830. }
  831. if (off) {
  832. val = rdlp(mp, off);
  833. val |= 1 << txq->index;
  834. wrlp(mp, off, val);
  835. }
  836. }
  837. static void txq_set_wrr(struct tx_queue *txq, int weight)
  838. {
  839. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  840. int off;
  841. u32 val;
  842. /*
  843. * Turn off fixed priority mode.
  844. */
  845. off = 0;
  846. switch (mp->shared->tx_bw_control) {
  847. case TX_BW_CONTROL_OLD_LAYOUT:
  848. off = TXQ_FIX_PRIO_CONF;
  849. break;
  850. case TX_BW_CONTROL_NEW_LAYOUT:
  851. off = TXQ_FIX_PRIO_CONF_MOVED;
  852. break;
  853. }
  854. if (off) {
  855. val = rdlp(mp, off);
  856. val &= ~(1 << txq->index);
  857. wrlp(mp, off, val);
  858. /*
  859. * Configure WRR weight for this queue.
  860. */
  861. val = rdlp(mp, off);
  862. val = (val & ~0xff) | (weight & 0xff);
  863. wrlp(mp, TXQ_BW_WRR_CONF(txq->index), val);
  864. }
  865. }
  866. /* mii management interface *************************************************/
  867. static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
  868. {
  869. struct mv643xx_eth_shared_private *msp = dev_id;
  870. if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
  871. writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
  872. wake_up(&msp->smi_busy_wait);
  873. return IRQ_HANDLED;
  874. }
  875. return IRQ_NONE;
  876. }
  877. static int smi_is_done(struct mv643xx_eth_shared_private *msp)
  878. {
  879. return !(readl(msp->base + SMI_REG) & SMI_BUSY);
  880. }
  881. static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
  882. {
  883. if (msp->err_interrupt == NO_IRQ) {
  884. int i;
  885. for (i = 0; !smi_is_done(msp); i++) {
  886. if (i == 10)
  887. return -ETIMEDOUT;
  888. msleep(10);
  889. }
  890. return 0;
  891. }
  892. if (!smi_is_done(msp)) {
  893. wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
  894. msecs_to_jiffies(100));
  895. if (!smi_is_done(msp))
  896. return -ETIMEDOUT;
  897. }
  898. return 0;
  899. }
  900. static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
  901. {
  902. struct mv643xx_eth_shared_private *msp = bus->priv;
  903. void __iomem *smi_reg = msp->base + SMI_REG;
  904. int ret;
  905. if (smi_wait_ready(msp)) {
  906. printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
  907. return -ETIMEDOUT;
  908. }
  909. writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
  910. if (smi_wait_ready(msp)) {
  911. printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
  912. return -ETIMEDOUT;
  913. }
  914. ret = readl(smi_reg);
  915. if (!(ret & SMI_READ_VALID)) {
  916. printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n");
  917. return -ENODEV;
  918. }
  919. return ret & 0xffff;
  920. }
  921. static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
  922. {
  923. struct mv643xx_eth_shared_private *msp = bus->priv;
  924. void __iomem *smi_reg = msp->base + SMI_REG;
  925. if (smi_wait_ready(msp)) {
  926. printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
  927. return -ETIMEDOUT;
  928. }
  929. writel(SMI_OPCODE_WRITE | (reg << 21) |
  930. (addr << 16) | (val & 0xffff), smi_reg);
  931. if (smi_wait_ready(msp)) {
  932. printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
  933. return -ETIMEDOUT;
  934. }
  935. return 0;
  936. }
  937. /* statistics ***************************************************************/
  938. static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
  939. {
  940. struct mv643xx_eth_private *mp = netdev_priv(dev);
  941. struct net_device_stats *stats = &dev->stats;
  942. unsigned long tx_packets = 0;
  943. unsigned long tx_bytes = 0;
  944. unsigned long tx_dropped = 0;
  945. int i;
  946. for (i = 0; i < mp->txq_count; i++) {
  947. struct tx_queue *txq = mp->txq + i;
  948. tx_packets += txq->tx_packets;
  949. tx_bytes += txq->tx_bytes;
  950. tx_dropped += txq->tx_dropped;
  951. }
  952. stats->tx_packets = tx_packets;
  953. stats->tx_bytes = tx_bytes;
  954. stats->tx_dropped = tx_dropped;
  955. return stats;
  956. }
  957. static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
  958. {
  959. return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
  960. }
  961. static void mib_counters_clear(struct mv643xx_eth_private *mp)
  962. {
  963. int i;
  964. for (i = 0; i < 0x80; i += 4)
  965. mib_read(mp, i);
  966. }
  967. static void mib_counters_update(struct mv643xx_eth_private *mp)
  968. {
  969. struct mib_counters *p = &mp->mib_counters;
  970. spin_lock_bh(&mp->mib_counters_lock);
  971. p->good_octets_received += mib_read(mp, 0x00);
  972. p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
  973. p->bad_octets_received += mib_read(mp, 0x08);
  974. p->internal_mac_transmit_err += mib_read(mp, 0x0c);
  975. p->good_frames_received += mib_read(mp, 0x10);
  976. p->bad_frames_received += mib_read(mp, 0x14);
  977. p->broadcast_frames_received += mib_read(mp, 0x18);
  978. p->multicast_frames_received += mib_read(mp, 0x1c);
  979. p->frames_64_octets += mib_read(mp, 0x20);
  980. p->frames_65_to_127_octets += mib_read(mp, 0x24);
  981. p->frames_128_to_255_octets += mib_read(mp, 0x28);
  982. p->frames_256_to_511_octets += mib_read(mp, 0x2c);
  983. p->frames_512_to_1023_octets += mib_read(mp, 0x30);
  984. p->frames_1024_to_max_octets += mib_read(mp, 0x34);
  985. p->good_octets_sent += mib_read(mp, 0x38);
  986. p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
  987. p->good_frames_sent += mib_read(mp, 0x40);
  988. p->excessive_collision += mib_read(mp, 0x44);
  989. p->multicast_frames_sent += mib_read(mp, 0x48);
  990. p->broadcast_frames_sent += mib_read(mp, 0x4c);
  991. p->unrec_mac_control_received += mib_read(mp, 0x50);
  992. p->fc_sent += mib_read(mp, 0x54);
  993. p->good_fc_received += mib_read(mp, 0x58);
  994. p->bad_fc_received += mib_read(mp, 0x5c);
  995. p->undersize_received += mib_read(mp, 0x60);
  996. p->fragments_received += mib_read(mp, 0x64);
  997. p->oversize_received += mib_read(mp, 0x68);
  998. p->jabber_received += mib_read(mp, 0x6c);
  999. p->mac_receive_error += mib_read(mp, 0x70);
  1000. p->bad_crc_event += mib_read(mp, 0x74);
  1001. p->collision += mib_read(mp, 0x78);
  1002. p->late_collision += mib_read(mp, 0x7c);
  1003. spin_unlock_bh(&mp->mib_counters_lock);
  1004. mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
  1005. }
  1006. static void mib_counters_timer_wrapper(unsigned long _mp)
  1007. {
  1008. struct mv643xx_eth_private *mp = (void *)_mp;
  1009. mib_counters_update(mp);
  1010. }
  1011. /* ethtool ******************************************************************/
  1012. struct mv643xx_eth_stats {
  1013. char stat_string[ETH_GSTRING_LEN];
  1014. int sizeof_stat;
  1015. int netdev_off;
  1016. int mp_off;
  1017. };
  1018. #define SSTAT(m) \
  1019. { #m, FIELD_SIZEOF(struct net_device_stats, m), \
  1020. offsetof(struct net_device, stats.m), -1 }
  1021. #define MIBSTAT(m) \
  1022. { #m, FIELD_SIZEOF(struct mib_counters, m), \
  1023. -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
  1024. static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
  1025. SSTAT(rx_packets),
  1026. SSTAT(tx_packets),
  1027. SSTAT(rx_bytes),
  1028. SSTAT(tx_bytes),
  1029. SSTAT(rx_errors),
  1030. SSTAT(tx_errors),
  1031. SSTAT(rx_dropped),
  1032. SSTAT(tx_dropped),
  1033. MIBSTAT(good_octets_received),
  1034. MIBSTAT(bad_octets_received),
  1035. MIBSTAT(internal_mac_transmit_err),
  1036. MIBSTAT(good_frames_received),
  1037. MIBSTAT(bad_frames_received),
  1038. MIBSTAT(broadcast_frames_received),
  1039. MIBSTAT(multicast_frames_received),
  1040. MIBSTAT(frames_64_octets),
  1041. MIBSTAT(frames_65_to_127_octets),
  1042. MIBSTAT(frames_128_to_255_octets),
  1043. MIBSTAT(frames_256_to_511_octets),
  1044. MIBSTAT(frames_512_to_1023_octets),
  1045. MIBSTAT(frames_1024_to_max_octets),
  1046. MIBSTAT(good_octets_sent),
  1047. MIBSTAT(good_frames_sent),
  1048. MIBSTAT(excessive_collision),
  1049. MIBSTAT(multicast_frames_sent),
  1050. MIBSTAT(broadcast_frames_sent),
  1051. MIBSTAT(unrec_mac_control_received),
  1052. MIBSTAT(fc_sent),
  1053. MIBSTAT(good_fc_received),
  1054. MIBSTAT(bad_fc_received),
  1055. MIBSTAT(undersize_received),
  1056. MIBSTAT(fragments_received),
  1057. MIBSTAT(oversize_received),
  1058. MIBSTAT(jabber_received),
  1059. MIBSTAT(mac_receive_error),
  1060. MIBSTAT(bad_crc_event),
  1061. MIBSTAT(collision),
  1062. MIBSTAT(late_collision),
  1063. };
  1064. static int
  1065. mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1066. {
  1067. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1068. int err;
  1069. err = phy_read_status(mp->phy);
  1070. if (err == 0)
  1071. err = phy_ethtool_gset(mp->phy, cmd);
  1072. /*
  1073. * The MAC does not support 1000baseT_Half.
  1074. */
  1075. cmd->supported &= ~SUPPORTED_1000baseT_Half;
  1076. cmd->advertising &= ~ADVERTISED_1000baseT_Half;
  1077. return err;
  1078. }
  1079. static int
  1080. mv643xx_eth_get_settings_phyless(struct net_device *dev,
  1081. struct ethtool_cmd *cmd)
  1082. {
  1083. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1084. u32 port_status;
  1085. port_status = rdlp(mp, PORT_STATUS);
  1086. cmd->supported = SUPPORTED_MII;
  1087. cmd->advertising = ADVERTISED_MII;
  1088. switch (port_status & PORT_SPEED_MASK) {
  1089. case PORT_SPEED_10:
  1090. cmd->speed = SPEED_10;
  1091. break;
  1092. case PORT_SPEED_100:
  1093. cmd->speed = SPEED_100;
  1094. break;
  1095. case PORT_SPEED_1000:
  1096. cmd->speed = SPEED_1000;
  1097. break;
  1098. default:
  1099. cmd->speed = -1;
  1100. break;
  1101. }
  1102. cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
  1103. cmd->port = PORT_MII;
  1104. cmd->phy_address = 0;
  1105. cmd->transceiver = XCVR_INTERNAL;
  1106. cmd->autoneg = AUTONEG_DISABLE;
  1107. cmd->maxtxpkt = 1;
  1108. cmd->maxrxpkt = 1;
  1109. return 0;
  1110. }
  1111. static int
  1112. mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1113. {
  1114. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1115. /*
  1116. * The MAC does not support 1000baseT_Half.
  1117. */
  1118. cmd->advertising &= ~ADVERTISED_1000baseT_Half;
  1119. return phy_ethtool_sset(mp->phy, cmd);
  1120. }
  1121. static int
  1122. mv643xx_eth_set_settings_phyless(struct net_device *dev,
  1123. struct ethtool_cmd *cmd)
  1124. {
  1125. return -EINVAL;
  1126. }
  1127. static void mv643xx_eth_get_drvinfo(struct net_device *dev,
  1128. struct ethtool_drvinfo *drvinfo)
  1129. {
  1130. strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
  1131. strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
  1132. strncpy(drvinfo->fw_version, "N/A", 32);
  1133. strncpy(drvinfo->bus_info, "platform", 32);
  1134. drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
  1135. }
  1136. static int mv643xx_eth_nway_reset(struct net_device *dev)
  1137. {
  1138. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1139. return genphy_restart_aneg(mp->phy);
  1140. }
  1141. static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
  1142. {
  1143. return -EINVAL;
  1144. }
  1145. static u32 mv643xx_eth_get_link(struct net_device *dev)
  1146. {
  1147. return !!netif_carrier_ok(dev);
  1148. }
  1149. static void mv643xx_eth_get_strings(struct net_device *dev,
  1150. uint32_t stringset, uint8_t *data)
  1151. {
  1152. int i;
  1153. if (stringset == ETH_SS_STATS) {
  1154. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1155. memcpy(data + i * ETH_GSTRING_LEN,
  1156. mv643xx_eth_stats[i].stat_string,
  1157. ETH_GSTRING_LEN);
  1158. }
  1159. }
  1160. }
  1161. static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
  1162. struct ethtool_stats *stats,
  1163. uint64_t *data)
  1164. {
  1165. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1166. int i;
  1167. mv643xx_eth_get_stats(dev);
  1168. mib_counters_update(mp);
  1169. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1170. const struct mv643xx_eth_stats *stat;
  1171. void *p;
  1172. stat = mv643xx_eth_stats + i;
  1173. if (stat->netdev_off >= 0)
  1174. p = ((void *)mp->dev) + stat->netdev_off;
  1175. else
  1176. p = ((void *)mp) + stat->mp_off;
  1177. data[i] = (stat->sizeof_stat == 8) ?
  1178. *(uint64_t *)p : *(uint32_t *)p;
  1179. }
  1180. }
  1181. static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
  1182. {
  1183. if (sset == ETH_SS_STATS)
  1184. return ARRAY_SIZE(mv643xx_eth_stats);
  1185. return -EOPNOTSUPP;
  1186. }
  1187. static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
  1188. .get_settings = mv643xx_eth_get_settings,
  1189. .set_settings = mv643xx_eth_set_settings,
  1190. .get_drvinfo = mv643xx_eth_get_drvinfo,
  1191. .nway_reset = mv643xx_eth_nway_reset,
  1192. .get_link = mv643xx_eth_get_link,
  1193. .set_sg = ethtool_op_set_sg,
  1194. .get_strings = mv643xx_eth_get_strings,
  1195. .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
  1196. .get_sset_count = mv643xx_eth_get_sset_count,
  1197. };
  1198. static const struct ethtool_ops mv643xx_eth_ethtool_ops_phyless = {
  1199. .get_settings = mv643xx_eth_get_settings_phyless,
  1200. .set_settings = mv643xx_eth_set_settings_phyless,
  1201. .get_drvinfo = mv643xx_eth_get_drvinfo,
  1202. .nway_reset = mv643xx_eth_nway_reset_phyless,
  1203. .get_link = mv643xx_eth_get_link,
  1204. .set_sg = ethtool_op_set_sg,
  1205. .get_strings = mv643xx_eth_get_strings,
  1206. .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
  1207. .get_sset_count = mv643xx_eth_get_sset_count,
  1208. };
  1209. /* address handling *********************************************************/
  1210. static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
  1211. {
  1212. unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
  1213. unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
  1214. addr[0] = (mac_h >> 24) & 0xff;
  1215. addr[1] = (mac_h >> 16) & 0xff;
  1216. addr[2] = (mac_h >> 8) & 0xff;
  1217. addr[3] = mac_h & 0xff;
  1218. addr[4] = (mac_l >> 8) & 0xff;
  1219. addr[5] = mac_l & 0xff;
  1220. }
  1221. static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
  1222. {
  1223. wrlp(mp, MAC_ADDR_HIGH,
  1224. (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
  1225. wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
  1226. }
  1227. static u32 uc_addr_filter_mask(struct net_device *dev)
  1228. {
  1229. struct dev_addr_list *uc_ptr;
  1230. u32 nibbles;
  1231. if (dev->flags & IFF_PROMISC)
  1232. return 0;
  1233. nibbles = 1 << (dev->dev_addr[5] & 0x0f);
  1234. for (uc_ptr = dev->uc_list; uc_ptr != NULL; uc_ptr = uc_ptr->next) {
  1235. if (memcmp(dev->dev_addr, uc_ptr->da_addr, 5))
  1236. return 0;
  1237. if ((dev->dev_addr[5] ^ uc_ptr->da_addr[5]) & 0xf0)
  1238. return 0;
  1239. nibbles |= 1 << (uc_ptr->da_addr[5] & 0x0f);
  1240. }
  1241. return nibbles;
  1242. }
  1243. static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
  1244. {
  1245. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1246. u32 port_config;
  1247. u32 nibbles;
  1248. int i;
  1249. uc_addr_set(mp, dev->dev_addr);
  1250. port_config = rdlp(mp, PORT_CONFIG);
  1251. nibbles = uc_addr_filter_mask(dev);
  1252. if (!nibbles) {
  1253. port_config |= UNICAST_PROMISCUOUS_MODE;
  1254. wrlp(mp, PORT_CONFIG, port_config);
  1255. return;
  1256. }
  1257. for (i = 0; i < 16; i += 4) {
  1258. int off = UNICAST_TABLE(mp->port_num) + i;
  1259. u32 v;
  1260. v = 0;
  1261. if (nibbles & 1)
  1262. v |= 0x00000001;
  1263. if (nibbles & 2)
  1264. v |= 0x00000100;
  1265. if (nibbles & 4)
  1266. v |= 0x00010000;
  1267. if (nibbles & 8)
  1268. v |= 0x01000000;
  1269. nibbles >>= 4;
  1270. wrl(mp, off, v);
  1271. }
  1272. port_config &= ~UNICAST_PROMISCUOUS_MODE;
  1273. wrlp(mp, PORT_CONFIG, port_config);
  1274. }
  1275. static int addr_crc(unsigned char *addr)
  1276. {
  1277. int crc = 0;
  1278. int i;
  1279. for (i = 0; i < 6; i++) {
  1280. int j;
  1281. crc = (crc ^ addr[i]) << 8;
  1282. for (j = 7; j >= 0; j--) {
  1283. if (crc & (0x100 << j))
  1284. crc ^= 0x107 << j;
  1285. }
  1286. }
  1287. return crc;
  1288. }
  1289. static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
  1290. {
  1291. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1292. u32 *mc_spec;
  1293. u32 *mc_other;
  1294. struct dev_addr_list *addr;
  1295. int i;
  1296. if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  1297. int port_num;
  1298. u32 accept;
  1299. int i;
  1300. oom:
  1301. port_num = mp->port_num;
  1302. accept = 0x01010101;
  1303. for (i = 0; i < 0x100; i += 4) {
  1304. wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
  1305. wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
  1306. }
  1307. return;
  1308. }
  1309. mc_spec = kmalloc(0x200, GFP_ATOMIC);
  1310. if (mc_spec == NULL)
  1311. goto oom;
  1312. mc_other = mc_spec + (0x100 >> 2);
  1313. memset(mc_spec, 0, 0x100);
  1314. memset(mc_other, 0, 0x100);
  1315. for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
  1316. u8 *a = addr->da_addr;
  1317. u32 *table;
  1318. int entry;
  1319. if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
  1320. table = mc_spec;
  1321. entry = a[5];
  1322. } else {
  1323. table = mc_other;
  1324. entry = addr_crc(a);
  1325. }
  1326. table[entry >> 2] |= 1 << (8 * (entry & 3));
  1327. }
  1328. for (i = 0; i < 0x100; i += 4) {
  1329. wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
  1330. wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
  1331. }
  1332. kfree(mc_spec);
  1333. }
  1334. static void mv643xx_eth_set_rx_mode(struct net_device *dev)
  1335. {
  1336. mv643xx_eth_program_unicast_filter(dev);
  1337. mv643xx_eth_program_multicast_filter(dev);
  1338. }
  1339. static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
  1340. {
  1341. struct sockaddr *sa = addr;
  1342. memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
  1343. netif_addr_lock_bh(dev);
  1344. mv643xx_eth_program_unicast_filter(dev);
  1345. netif_addr_unlock_bh(dev);
  1346. return 0;
  1347. }
  1348. /* rx/tx queue initialisation ***********************************************/
  1349. static int rxq_init(struct mv643xx_eth_private *mp, int index)
  1350. {
  1351. struct rx_queue *rxq = mp->rxq + index;
  1352. struct rx_desc *rx_desc;
  1353. int size;
  1354. int i;
  1355. rxq->index = index;
  1356. rxq->rx_ring_size = mp->default_rx_ring_size;
  1357. rxq->rx_desc_count = 0;
  1358. rxq->rx_curr_desc = 0;
  1359. rxq->rx_used_desc = 0;
  1360. size = rxq->rx_ring_size * sizeof(struct rx_desc);
  1361. if (index == 0 && size <= mp->rx_desc_sram_size) {
  1362. rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
  1363. mp->rx_desc_sram_size);
  1364. rxq->rx_desc_dma = mp->rx_desc_sram_addr;
  1365. } else {
  1366. rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
  1367. &rxq->rx_desc_dma,
  1368. GFP_KERNEL);
  1369. }
  1370. if (rxq->rx_desc_area == NULL) {
  1371. dev_printk(KERN_ERR, &mp->dev->dev,
  1372. "can't allocate rx ring (%d bytes)\n", size);
  1373. goto out;
  1374. }
  1375. memset(rxq->rx_desc_area, 0, size);
  1376. rxq->rx_desc_area_size = size;
  1377. rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
  1378. GFP_KERNEL);
  1379. if (rxq->rx_skb == NULL) {
  1380. dev_printk(KERN_ERR, &mp->dev->dev,
  1381. "can't allocate rx skb ring\n");
  1382. goto out_free;
  1383. }
  1384. rx_desc = (struct rx_desc *)rxq->rx_desc_area;
  1385. for (i = 0; i < rxq->rx_ring_size; i++) {
  1386. int nexti;
  1387. nexti = i + 1;
  1388. if (nexti == rxq->rx_ring_size)
  1389. nexti = 0;
  1390. rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
  1391. nexti * sizeof(struct rx_desc);
  1392. }
  1393. return 0;
  1394. out_free:
  1395. if (index == 0 && size <= mp->rx_desc_sram_size)
  1396. iounmap(rxq->rx_desc_area);
  1397. else
  1398. dma_free_coherent(NULL, size,
  1399. rxq->rx_desc_area,
  1400. rxq->rx_desc_dma);
  1401. out:
  1402. return -ENOMEM;
  1403. }
  1404. static void rxq_deinit(struct rx_queue *rxq)
  1405. {
  1406. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  1407. int i;
  1408. rxq_disable(rxq);
  1409. for (i = 0; i < rxq->rx_ring_size; i++) {
  1410. if (rxq->rx_skb[i]) {
  1411. dev_kfree_skb(rxq->rx_skb[i]);
  1412. rxq->rx_desc_count--;
  1413. }
  1414. }
  1415. if (rxq->rx_desc_count) {
  1416. dev_printk(KERN_ERR, &mp->dev->dev,
  1417. "error freeing rx ring -- %d skbs stuck\n",
  1418. rxq->rx_desc_count);
  1419. }
  1420. if (rxq->index == 0 &&
  1421. rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
  1422. iounmap(rxq->rx_desc_area);
  1423. else
  1424. dma_free_coherent(NULL, rxq->rx_desc_area_size,
  1425. rxq->rx_desc_area, rxq->rx_desc_dma);
  1426. kfree(rxq->rx_skb);
  1427. }
  1428. static int txq_init(struct mv643xx_eth_private *mp, int index)
  1429. {
  1430. struct tx_queue *txq = mp->txq + index;
  1431. struct tx_desc *tx_desc;
  1432. int size;
  1433. int i;
  1434. txq->index = index;
  1435. txq->tx_ring_size = mp->default_tx_ring_size;
  1436. txq->tx_desc_count = 0;
  1437. txq->tx_curr_desc = 0;
  1438. txq->tx_used_desc = 0;
  1439. size = txq->tx_ring_size * sizeof(struct tx_desc);
  1440. if (index == 0 && size <= mp->tx_desc_sram_size) {
  1441. txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
  1442. mp->tx_desc_sram_size);
  1443. txq->tx_desc_dma = mp->tx_desc_sram_addr;
  1444. } else {
  1445. txq->tx_desc_area = dma_alloc_coherent(NULL, size,
  1446. &txq->tx_desc_dma,
  1447. GFP_KERNEL);
  1448. }
  1449. if (txq->tx_desc_area == NULL) {
  1450. dev_printk(KERN_ERR, &mp->dev->dev,
  1451. "can't allocate tx ring (%d bytes)\n", size);
  1452. return -ENOMEM;
  1453. }
  1454. memset(txq->tx_desc_area, 0, size);
  1455. txq->tx_desc_area_size = size;
  1456. tx_desc = (struct tx_desc *)txq->tx_desc_area;
  1457. for (i = 0; i < txq->tx_ring_size; i++) {
  1458. struct tx_desc *txd = tx_desc + i;
  1459. int nexti;
  1460. nexti = i + 1;
  1461. if (nexti == txq->tx_ring_size)
  1462. nexti = 0;
  1463. txd->cmd_sts = 0;
  1464. txd->next_desc_ptr = txq->tx_desc_dma +
  1465. nexti * sizeof(struct tx_desc);
  1466. }
  1467. skb_queue_head_init(&txq->tx_skb);
  1468. return 0;
  1469. }
  1470. static void txq_deinit(struct tx_queue *txq)
  1471. {
  1472. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  1473. txq_disable(txq);
  1474. txq_reclaim(txq, txq->tx_ring_size, 1);
  1475. BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
  1476. if (txq->index == 0 &&
  1477. txq->tx_desc_area_size <= mp->tx_desc_sram_size)
  1478. iounmap(txq->tx_desc_area);
  1479. else
  1480. dma_free_coherent(NULL, txq->tx_desc_area_size,
  1481. txq->tx_desc_area, txq->tx_desc_dma);
  1482. }
  1483. /* netdev ops and related ***************************************************/
  1484. static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
  1485. {
  1486. u32 int_cause;
  1487. u32 int_cause_ext;
  1488. int_cause = rdlp(mp, INT_CAUSE) & (INT_TX_END | INT_RX | INT_EXT);
  1489. if (int_cause == 0)
  1490. return 0;
  1491. int_cause_ext = 0;
  1492. if (int_cause & INT_EXT)
  1493. int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
  1494. int_cause &= INT_TX_END | INT_RX;
  1495. if (int_cause) {
  1496. wrlp(mp, INT_CAUSE, ~int_cause);
  1497. mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
  1498. ~(rdlp(mp, TXQ_COMMAND) & 0xff);
  1499. mp->work_rx |= (int_cause & INT_RX) >> 2;
  1500. }
  1501. int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
  1502. if (int_cause_ext) {
  1503. wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
  1504. if (int_cause_ext & INT_EXT_LINK_PHY)
  1505. mp->work_link = 1;
  1506. mp->work_tx |= int_cause_ext & INT_EXT_TX;
  1507. }
  1508. return 1;
  1509. }
  1510. static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
  1511. {
  1512. struct net_device *dev = (struct net_device *)dev_id;
  1513. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1514. if (unlikely(!mv643xx_eth_collect_events(mp)))
  1515. return IRQ_NONE;
  1516. wrlp(mp, INT_MASK, 0);
  1517. napi_schedule(&mp->napi);
  1518. return IRQ_HANDLED;
  1519. }
  1520. static void handle_link_event(struct mv643xx_eth_private *mp)
  1521. {
  1522. struct net_device *dev = mp->dev;
  1523. u32 port_status;
  1524. int speed;
  1525. int duplex;
  1526. int fc;
  1527. port_status = rdlp(mp, PORT_STATUS);
  1528. if (!(port_status & LINK_UP)) {
  1529. if (netif_carrier_ok(dev)) {
  1530. int i;
  1531. printk(KERN_INFO "%s: link down\n", dev->name);
  1532. netif_carrier_off(dev);
  1533. for (i = 0; i < mp->txq_count; i++) {
  1534. struct tx_queue *txq = mp->txq + i;
  1535. txq_reclaim(txq, txq->tx_ring_size, 1);
  1536. txq_reset_hw_ptr(txq);
  1537. }
  1538. }
  1539. return;
  1540. }
  1541. switch (port_status & PORT_SPEED_MASK) {
  1542. case PORT_SPEED_10:
  1543. speed = 10;
  1544. break;
  1545. case PORT_SPEED_100:
  1546. speed = 100;
  1547. break;
  1548. case PORT_SPEED_1000:
  1549. speed = 1000;
  1550. break;
  1551. default:
  1552. speed = -1;
  1553. break;
  1554. }
  1555. duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
  1556. fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
  1557. printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
  1558. "flow control %sabled\n", dev->name,
  1559. speed, duplex ? "full" : "half",
  1560. fc ? "en" : "dis");
  1561. if (!netif_carrier_ok(dev))
  1562. netif_carrier_on(dev);
  1563. }
  1564. static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
  1565. {
  1566. struct mv643xx_eth_private *mp;
  1567. int work_done;
  1568. mp = container_of(napi, struct mv643xx_eth_private, napi);
  1569. mp->work_rx_refill |= mp->work_rx_oom;
  1570. mp->work_rx_oom = 0;
  1571. work_done = 0;
  1572. while (work_done < budget) {
  1573. u8 queue_mask;
  1574. int queue;
  1575. int work_tbd;
  1576. if (mp->work_link) {
  1577. mp->work_link = 0;
  1578. handle_link_event(mp);
  1579. continue;
  1580. }
  1581. queue_mask = mp->work_tx | mp->work_tx_end |
  1582. mp->work_rx | mp->work_rx_refill;
  1583. if (!queue_mask) {
  1584. if (mv643xx_eth_collect_events(mp))
  1585. continue;
  1586. break;
  1587. }
  1588. queue = fls(queue_mask) - 1;
  1589. queue_mask = 1 << queue;
  1590. work_tbd = budget - work_done;
  1591. if (work_tbd > 16)
  1592. work_tbd = 16;
  1593. if (mp->work_tx_end & queue_mask) {
  1594. txq_kick(mp->txq + queue);
  1595. } else if (mp->work_tx & queue_mask) {
  1596. work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
  1597. txq_maybe_wake(mp->txq + queue);
  1598. } else if (mp->work_rx & queue_mask) {
  1599. work_done += rxq_process(mp->rxq + queue, work_tbd);
  1600. } else if (mp->work_rx_refill & queue_mask) {
  1601. work_done += rxq_refill(mp->rxq + queue, work_tbd);
  1602. } else {
  1603. BUG();
  1604. }
  1605. }
  1606. if (work_done < budget) {
  1607. if (mp->work_rx_oom)
  1608. mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
  1609. napi_complete(napi);
  1610. wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
  1611. }
  1612. return work_done;
  1613. }
  1614. static inline void oom_timer_wrapper(unsigned long data)
  1615. {
  1616. struct mv643xx_eth_private *mp = (void *)data;
  1617. napi_schedule(&mp->napi);
  1618. }
  1619. static void phy_reset(struct mv643xx_eth_private *mp)
  1620. {
  1621. int data;
  1622. data = phy_read(mp->phy, MII_BMCR);
  1623. if (data < 0)
  1624. return;
  1625. data |= BMCR_RESET;
  1626. if (phy_write(mp->phy, MII_BMCR, data) < 0)
  1627. return;
  1628. do {
  1629. data = phy_read(mp->phy, MII_BMCR);
  1630. } while (data >= 0 && data & BMCR_RESET);
  1631. }
  1632. static void port_start(struct mv643xx_eth_private *mp)
  1633. {
  1634. u32 pscr;
  1635. int i;
  1636. /*
  1637. * Perform PHY reset, if there is a PHY.
  1638. */
  1639. if (mp->phy != NULL) {
  1640. struct ethtool_cmd cmd;
  1641. mv643xx_eth_get_settings(mp->dev, &cmd);
  1642. phy_reset(mp);
  1643. mv643xx_eth_set_settings(mp->dev, &cmd);
  1644. }
  1645. /*
  1646. * Configure basic link parameters.
  1647. */
  1648. pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  1649. pscr |= SERIAL_PORT_ENABLE;
  1650. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  1651. pscr |= DO_NOT_FORCE_LINK_FAIL;
  1652. if (mp->phy == NULL)
  1653. pscr |= FORCE_LINK_PASS;
  1654. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  1655. wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
  1656. /*
  1657. * Configure TX path and queues.
  1658. */
  1659. tx_set_rate(mp, 1000000000, 16777216);
  1660. for (i = 0; i < mp->txq_count; i++) {
  1661. struct tx_queue *txq = mp->txq + i;
  1662. txq_reset_hw_ptr(txq);
  1663. txq_set_rate(txq, 1000000000, 16777216);
  1664. txq_set_fixed_prio_mode(txq);
  1665. }
  1666. /*
  1667. * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
  1668. * frames to RX queue #0, and include the pseudo-header when
  1669. * calculating receive checksums.
  1670. */
  1671. wrlp(mp, PORT_CONFIG, 0x02000000);
  1672. /*
  1673. * Treat BPDUs as normal multicasts, and disable partition mode.
  1674. */
  1675. wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
  1676. /*
  1677. * Add configured unicast addresses to address filter table.
  1678. */
  1679. mv643xx_eth_program_unicast_filter(mp->dev);
  1680. /*
  1681. * Enable the receive queues.
  1682. */
  1683. for (i = 0; i < mp->rxq_count; i++) {
  1684. struct rx_queue *rxq = mp->rxq + i;
  1685. u32 addr;
  1686. addr = (u32)rxq->rx_desc_dma;
  1687. addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
  1688. wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
  1689. rxq_enable(rxq);
  1690. }
  1691. }
  1692. static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
  1693. {
  1694. unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
  1695. u32 val;
  1696. val = rdlp(mp, SDMA_CONFIG);
  1697. if (mp->shared->extended_rx_coal_limit) {
  1698. if (coal > 0xffff)
  1699. coal = 0xffff;
  1700. val &= ~0x023fff80;
  1701. val |= (coal & 0x8000) << 10;
  1702. val |= (coal & 0x7fff) << 7;
  1703. } else {
  1704. if (coal > 0x3fff)
  1705. coal = 0x3fff;
  1706. val &= ~0x003fff00;
  1707. val |= (coal & 0x3fff) << 8;
  1708. }
  1709. wrlp(mp, SDMA_CONFIG, val);
  1710. }
  1711. static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
  1712. {
  1713. unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
  1714. if (coal > 0x3fff)
  1715. coal = 0x3fff;
  1716. wrlp(mp, TX_FIFO_URGENT_THRESHOLD, (coal & 0x3fff) << 4);
  1717. }
  1718. static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
  1719. {
  1720. int skb_size;
  1721. /*
  1722. * Reserve 2+14 bytes for an ethernet header (the hardware
  1723. * automatically prepends 2 bytes of dummy data to each
  1724. * received packet), 16 bytes for up to four VLAN tags, and
  1725. * 4 bytes for the trailing FCS -- 36 bytes total.
  1726. */
  1727. skb_size = mp->dev->mtu + 36;
  1728. /*
  1729. * Make sure that the skb size is a multiple of 8 bytes, as
  1730. * the lower three bits of the receive descriptor's buffer
  1731. * size field are ignored by the hardware.
  1732. */
  1733. mp->skb_size = (skb_size + 7) & ~7;
  1734. }
  1735. static int mv643xx_eth_open(struct net_device *dev)
  1736. {
  1737. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1738. int err;
  1739. int i;
  1740. wrlp(mp, INT_CAUSE, 0);
  1741. wrlp(mp, INT_CAUSE_EXT, 0);
  1742. rdlp(mp, INT_CAUSE_EXT);
  1743. err = request_irq(dev->irq, mv643xx_eth_irq,
  1744. IRQF_SHARED, dev->name, dev);
  1745. if (err) {
  1746. dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
  1747. return -EAGAIN;
  1748. }
  1749. mv643xx_eth_recalc_skb_size(mp);
  1750. napi_enable(&mp->napi);
  1751. skb_queue_head_init(&mp->rx_recycle);
  1752. for (i = 0; i < mp->rxq_count; i++) {
  1753. err = rxq_init(mp, i);
  1754. if (err) {
  1755. while (--i >= 0)
  1756. rxq_deinit(mp->rxq + i);
  1757. goto out;
  1758. }
  1759. rxq_refill(mp->rxq + i, INT_MAX);
  1760. }
  1761. if (mp->work_rx_oom) {
  1762. mp->rx_oom.expires = jiffies + (HZ / 10);
  1763. add_timer(&mp->rx_oom);
  1764. }
  1765. for (i = 0; i < mp->txq_count; i++) {
  1766. err = txq_init(mp, i);
  1767. if (err) {
  1768. while (--i >= 0)
  1769. txq_deinit(mp->txq + i);
  1770. goto out_free;
  1771. }
  1772. }
  1773. netif_carrier_off(dev);
  1774. port_start(mp);
  1775. set_rx_coal(mp, 0);
  1776. set_tx_coal(mp, 0);
  1777. wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
  1778. wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
  1779. return 0;
  1780. out_free:
  1781. for (i = 0; i < mp->rxq_count; i++)
  1782. rxq_deinit(mp->rxq + i);
  1783. out:
  1784. free_irq(dev->irq, dev);
  1785. return err;
  1786. }
  1787. static void port_reset(struct mv643xx_eth_private *mp)
  1788. {
  1789. unsigned int data;
  1790. int i;
  1791. for (i = 0; i < mp->rxq_count; i++)
  1792. rxq_disable(mp->rxq + i);
  1793. for (i = 0; i < mp->txq_count; i++)
  1794. txq_disable(mp->txq + i);
  1795. while (1) {
  1796. u32 ps = rdlp(mp, PORT_STATUS);
  1797. if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
  1798. break;
  1799. udelay(10);
  1800. }
  1801. /* Reset the Enable bit in the Configuration Register */
  1802. data = rdlp(mp, PORT_SERIAL_CONTROL);
  1803. data &= ~(SERIAL_PORT_ENABLE |
  1804. DO_NOT_FORCE_LINK_FAIL |
  1805. FORCE_LINK_PASS);
  1806. wrlp(mp, PORT_SERIAL_CONTROL, data);
  1807. }
  1808. static int mv643xx_eth_stop(struct net_device *dev)
  1809. {
  1810. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1811. int i;
  1812. wrlp(mp, INT_MASK_EXT, 0x00000000);
  1813. wrlp(mp, INT_MASK, 0x00000000);
  1814. rdlp(mp, INT_MASK);
  1815. napi_disable(&mp->napi);
  1816. del_timer_sync(&mp->rx_oom);
  1817. netif_carrier_off(dev);
  1818. free_irq(dev->irq, dev);
  1819. port_reset(mp);
  1820. mv643xx_eth_get_stats(dev);
  1821. mib_counters_update(mp);
  1822. del_timer_sync(&mp->mib_counters_timer);
  1823. skb_queue_purge(&mp->rx_recycle);
  1824. for (i = 0; i < mp->rxq_count; i++)
  1825. rxq_deinit(mp->rxq + i);
  1826. for (i = 0; i < mp->txq_count; i++)
  1827. txq_deinit(mp->txq + i);
  1828. return 0;
  1829. }
  1830. static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1831. {
  1832. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1833. if (mp->phy != NULL)
  1834. return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd);
  1835. return -EOPNOTSUPP;
  1836. }
  1837. static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
  1838. {
  1839. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1840. if (new_mtu < 64 || new_mtu > 9500)
  1841. return -EINVAL;
  1842. dev->mtu = new_mtu;
  1843. mv643xx_eth_recalc_skb_size(mp);
  1844. tx_set_rate(mp, 1000000000, 16777216);
  1845. if (!netif_running(dev))
  1846. return 0;
  1847. /*
  1848. * Stop and then re-open the interface. This will allocate RX
  1849. * skbs of the new MTU.
  1850. * There is a possible danger that the open will not succeed,
  1851. * due to memory being full.
  1852. */
  1853. mv643xx_eth_stop(dev);
  1854. if (mv643xx_eth_open(dev)) {
  1855. dev_printk(KERN_ERR, &dev->dev,
  1856. "fatal error on re-opening device after "
  1857. "MTU change\n");
  1858. }
  1859. return 0;
  1860. }
  1861. static void tx_timeout_task(struct work_struct *ugly)
  1862. {
  1863. struct mv643xx_eth_private *mp;
  1864. mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
  1865. if (netif_running(mp->dev)) {
  1866. netif_tx_stop_all_queues(mp->dev);
  1867. port_reset(mp);
  1868. port_start(mp);
  1869. netif_tx_wake_all_queues(mp->dev);
  1870. }
  1871. }
  1872. static void mv643xx_eth_tx_timeout(struct net_device *dev)
  1873. {
  1874. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1875. dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
  1876. schedule_work(&mp->tx_timeout_task);
  1877. }
  1878. #ifdef CONFIG_NET_POLL_CONTROLLER
  1879. static void mv643xx_eth_netpoll(struct net_device *dev)
  1880. {
  1881. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1882. wrlp(mp, INT_MASK, 0x00000000);
  1883. rdlp(mp, INT_MASK);
  1884. mv643xx_eth_irq(dev->irq, dev);
  1885. wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
  1886. }
  1887. #endif
  1888. /* platform glue ************************************************************/
  1889. static void
  1890. mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
  1891. struct mbus_dram_target_info *dram)
  1892. {
  1893. void __iomem *base = msp->base;
  1894. u32 win_enable;
  1895. u32 win_protect;
  1896. int i;
  1897. for (i = 0; i < 6; i++) {
  1898. writel(0, base + WINDOW_BASE(i));
  1899. writel(0, base + WINDOW_SIZE(i));
  1900. if (i < 4)
  1901. writel(0, base + WINDOW_REMAP_HIGH(i));
  1902. }
  1903. win_enable = 0x3f;
  1904. win_protect = 0;
  1905. for (i = 0; i < dram->num_cs; i++) {
  1906. struct mbus_dram_window *cs = dram->cs + i;
  1907. writel((cs->base & 0xffff0000) |
  1908. (cs->mbus_attr << 8) |
  1909. dram->mbus_dram_target_id, base + WINDOW_BASE(i));
  1910. writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
  1911. win_enable &= ~(1 << i);
  1912. win_protect |= 3 << (2 * i);
  1913. }
  1914. writel(win_enable, base + WINDOW_BAR_ENABLE);
  1915. msp->win_protect = win_protect;
  1916. }
  1917. static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
  1918. {
  1919. /*
  1920. * Check whether we have a 14-bit coal limit field in bits
  1921. * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
  1922. * SDMA config register.
  1923. */
  1924. writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
  1925. if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
  1926. msp->extended_rx_coal_limit = 1;
  1927. else
  1928. msp->extended_rx_coal_limit = 0;
  1929. /*
  1930. * Check whether the MAC supports TX rate control, and if
  1931. * yes, whether its associated registers are in the old or
  1932. * the new place.
  1933. */
  1934. writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
  1935. if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
  1936. msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
  1937. } else {
  1938. writel(7, msp->base + 0x0400 + TX_BW_RATE);
  1939. if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
  1940. msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
  1941. else
  1942. msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
  1943. }
  1944. }
  1945. static int mv643xx_eth_shared_probe(struct platform_device *pdev)
  1946. {
  1947. static int mv643xx_eth_version_printed;
  1948. struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
  1949. struct mv643xx_eth_shared_private *msp;
  1950. struct resource *res;
  1951. int ret;
  1952. if (!mv643xx_eth_version_printed++)
  1953. printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
  1954. "driver version %s\n", mv643xx_eth_driver_version);
  1955. ret = -EINVAL;
  1956. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1957. if (res == NULL)
  1958. goto out;
  1959. ret = -ENOMEM;
  1960. msp = kmalloc(sizeof(*msp), GFP_KERNEL);
  1961. if (msp == NULL)
  1962. goto out;
  1963. memset(msp, 0, sizeof(*msp));
  1964. msp->base = ioremap(res->start, res->end - res->start + 1);
  1965. if (msp->base == NULL)
  1966. goto out_free;
  1967. /*
  1968. * Set up and register SMI bus.
  1969. */
  1970. if (pd == NULL || pd->shared_smi == NULL) {
  1971. msp->smi_bus = mdiobus_alloc();
  1972. if (msp->smi_bus == NULL)
  1973. goto out_unmap;
  1974. msp->smi_bus->priv = msp;
  1975. msp->smi_bus->name = "mv643xx_eth smi";
  1976. msp->smi_bus->read = smi_bus_read;
  1977. msp->smi_bus->write = smi_bus_write,
  1978. snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
  1979. msp->smi_bus->parent = &pdev->dev;
  1980. msp->smi_bus->phy_mask = 0xffffffff;
  1981. if (mdiobus_register(msp->smi_bus) < 0)
  1982. goto out_free_mii_bus;
  1983. msp->smi = msp;
  1984. } else {
  1985. msp->smi = platform_get_drvdata(pd->shared_smi);
  1986. }
  1987. msp->err_interrupt = NO_IRQ;
  1988. init_waitqueue_head(&msp->smi_busy_wait);
  1989. /*
  1990. * Check whether the error interrupt is hooked up.
  1991. */
  1992. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1993. if (res != NULL) {
  1994. int err;
  1995. err = request_irq(res->start, mv643xx_eth_err_irq,
  1996. IRQF_SHARED, "mv643xx_eth", msp);
  1997. if (!err) {
  1998. writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
  1999. msp->err_interrupt = res->start;
  2000. }
  2001. }
  2002. /*
  2003. * (Re-)program MBUS remapping windows if we are asked to.
  2004. */
  2005. if (pd != NULL && pd->dram != NULL)
  2006. mv643xx_eth_conf_mbus_windows(msp, pd->dram);
  2007. /*
  2008. * Detect hardware parameters.
  2009. */
  2010. msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
  2011. infer_hw_params(msp);
  2012. platform_set_drvdata(pdev, msp);
  2013. return 0;
  2014. out_free_mii_bus:
  2015. mdiobus_free(msp->smi_bus);
  2016. out_unmap:
  2017. iounmap(msp->base);
  2018. out_free:
  2019. kfree(msp);
  2020. out:
  2021. return ret;
  2022. }
  2023. static int mv643xx_eth_shared_remove(struct platform_device *pdev)
  2024. {
  2025. struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
  2026. struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
  2027. if (pd == NULL || pd->shared_smi == NULL) {
  2028. mdiobus_unregister(msp->smi_bus);
  2029. mdiobus_free(msp->smi_bus);
  2030. }
  2031. if (msp->err_interrupt != NO_IRQ)
  2032. free_irq(msp->err_interrupt, msp);
  2033. iounmap(msp->base);
  2034. kfree(msp);
  2035. return 0;
  2036. }
  2037. static struct platform_driver mv643xx_eth_shared_driver = {
  2038. .probe = mv643xx_eth_shared_probe,
  2039. .remove = mv643xx_eth_shared_remove,
  2040. .driver = {
  2041. .name = MV643XX_ETH_SHARED_NAME,
  2042. .owner = THIS_MODULE,
  2043. },
  2044. };
  2045. static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
  2046. {
  2047. int addr_shift = 5 * mp->port_num;
  2048. u32 data;
  2049. data = rdl(mp, PHY_ADDR);
  2050. data &= ~(0x1f << addr_shift);
  2051. data |= (phy_addr & 0x1f) << addr_shift;
  2052. wrl(mp, PHY_ADDR, data);
  2053. }
  2054. static int phy_addr_get(struct mv643xx_eth_private *mp)
  2055. {
  2056. unsigned int data;
  2057. data = rdl(mp, PHY_ADDR);
  2058. return (data >> (5 * mp->port_num)) & 0x1f;
  2059. }
  2060. static void set_params(struct mv643xx_eth_private *mp,
  2061. struct mv643xx_eth_platform_data *pd)
  2062. {
  2063. struct net_device *dev = mp->dev;
  2064. if (is_valid_ether_addr(pd->mac_addr))
  2065. memcpy(dev->dev_addr, pd->mac_addr, 6);
  2066. else
  2067. uc_addr_get(mp, dev->dev_addr);
  2068. mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
  2069. if (pd->rx_queue_size)
  2070. mp->default_rx_ring_size = pd->rx_queue_size;
  2071. mp->rx_desc_sram_addr = pd->rx_sram_addr;
  2072. mp->rx_desc_sram_size = pd->rx_sram_size;
  2073. mp->rxq_count = pd->rx_queue_count ? : 1;
  2074. mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
  2075. if (pd->tx_queue_size)
  2076. mp->default_tx_ring_size = pd->tx_queue_size;
  2077. mp->tx_desc_sram_addr = pd->tx_sram_addr;
  2078. mp->tx_desc_sram_size = pd->tx_sram_size;
  2079. mp->txq_count = pd->tx_queue_count ? : 1;
  2080. }
  2081. static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
  2082. int phy_addr)
  2083. {
  2084. struct mii_bus *bus = mp->shared->smi->smi_bus;
  2085. struct phy_device *phydev;
  2086. int start;
  2087. int num;
  2088. int i;
  2089. if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
  2090. start = phy_addr_get(mp) & 0x1f;
  2091. num = 32;
  2092. } else {
  2093. start = phy_addr & 0x1f;
  2094. num = 1;
  2095. }
  2096. phydev = NULL;
  2097. for (i = 0; i < num; i++) {
  2098. int addr = (start + i) & 0x1f;
  2099. if (bus->phy_map[addr] == NULL)
  2100. mdiobus_scan(bus, addr);
  2101. if (phydev == NULL) {
  2102. phydev = bus->phy_map[addr];
  2103. if (phydev != NULL)
  2104. phy_addr_set(mp, addr);
  2105. }
  2106. }
  2107. return phydev;
  2108. }
  2109. static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
  2110. {
  2111. struct phy_device *phy = mp->phy;
  2112. phy_reset(mp);
  2113. phy_attach(mp->dev, phy->dev.bus_id, 0, PHY_INTERFACE_MODE_GMII);
  2114. if (speed == 0) {
  2115. phy->autoneg = AUTONEG_ENABLE;
  2116. phy->speed = 0;
  2117. phy->duplex = 0;
  2118. phy->advertising = phy->supported | ADVERTISED_Autoneg;
  2119. } else {
  2120. phy->autoneg = AUTONEG_DISABLE;
  2121. phy->advertising = 0;
  2122. phy->speed = speed;
  2123. phy->duplex = duplex;
  2124. }
  2125. phy_start_aneg(phy);
  2126. }
  2127. static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
  2128. {
  2129. u32 pscr;
  2130. pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  2131. if (pscr & SERIAL_PORT_ENABLE) {
  2132. pscr &= ~SERIAL_PORT_ENABLE;
  2133. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  2134. }
  2135. pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
  2136. if (mp->phy == NULL) {
  2137. pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
  2138. if (speed == SPEED_1000)
  2139. pscr |= SET_GMII_SPEED_TO_1000;
  2140. else if (speed == SPEED_100)
  2141. pscr |= SET_MII_SPEED_TO_100;
  2142. pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
  2143. pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
  2144. if (duplex == DUPLEX_FULL)
  2145. pscr |= SET_FULL_DUPLEX_MODE;
  2146. }
  2147. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  2148. }
  2149. static int mv643xx_eth_probe(struct platform_device *pdev)
  2150. {
  2151. struct mv643xx_eth_platform_data *pd;
  2152. struct mv643xx_eth_private *mp;
  2153. struct net_device *dev;
  2154. struct resource *res;
  2155. int err;
  2156. pd = pdev->dev.platform_data;
  2157. if (pd == NULL) {
  2158. dev_printk(KERN_ERR, &pdev->dev,
  2159. "no mv643xx_eth_platform_data\n");
  2160. return -ENODEV;
  2161. }
  2162. if (pd->shared == NULL) {
  2163. dev_printk(KERN_ERR, &pdev->dev,
  2164. "no mv643xx_eth_platform_data->shared\n");
  2165. return -ENODEV;
  2166. }
  2167. dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
  2168. if (!dev)
  2169. return -ENOMEM;
  2170. mp = netdev_priv(dev);
  2171. platform_set_drvdata(pdev, mp);
  2172. mp->shared = platform_get_drvdata(pd->shared);
  2173. mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
  2174. mp->port_num = pd->port_number;
  2175. mp->dev = dev;
  2176. set_params(mp, pd);
  2177. dev->real_num_tx_queues = mp->txq_count;
  2178. if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
  2179. mp->phy = phy_scan(mp, pd->phy_addr);
  2180. if (mp->phy != NULL) {
  2181. phy_init(mp, pd->speed, pd->duplex);
  2182. SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
  2183. } else {
  2184. SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
  2185. }
  2186. init_pscr(mp, pd->speed, pd->duplex);
  2187. mib_counters_clear(mp);
  2188. init_timer(&mp->mib_counters_timer);
  2189. mp->mib_counters_timer.data = (unsigned long)mp;
  2190. mp->mib_counters_timer.function = mib_counters_timer_wrapper;
  2191. mp->mib_counters_timer.expires = jiffies + 30 * HZ;
  2192. add_timer(&mp->mib_counters_timer);
  2193. spin_lock_init(&mp->mib_counters_lock);
  2194. INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
  2195. netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
  2196. init_timer(&mp->rx_oom);
  2197. mp->rx_oom.data = (unsigned long)mp;
  2198. mp->rx_oom.function = oom_timer_wrapper;
  2199. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  2200. BUG_ON(!res);
  2201. dev->irq = res->start;
  2202. dev->get_stats = mv643xx_eth_get_stats;
  2203. dev->hard_start_xmit = mv643xx_eth_xmit;
  2204. dev->open = mv643xx_eth_open;
  2205. dev->stop = mv643xx_eth_stop;
  2206. dev->set_rx_mode = mv643xx_eth_set_rx_mode;
  2207. dev->set_mac_address = mv643xx_eth_set_mac_address;
  2208. dev->do_ioctl = mv643xx_eth_ioctl;
  2209. dev->change_mtu = mv643xx_eth_change_mtu;
  2210. dev->tx_timeout = mv643xx_eth_tx_timeout;
  2211. #ifdef CONFIG_NET_POLL_CONTROLLER
  2212. dev->poll_controller = mv643xx_eth_netpoll;
  2213. #endif
  2214. dev->watchdog_timeo = 2 * HZ;
  2215. dev->base_addr = 0;
  2216. dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
  2217. dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
  2218. SET_NETDEV_DEV(dev, &pdev->dev);
  2219. if (mp->shared->win_protect)
  2220. wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
  2221. err = register_netdev(dev);
  2222. if (err)
  2223. goto out;
  2224. dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n",
  2225. mp->port_num, dev->dev_addr);
  2226. if (mp->tx_desc_sram_size > 0)
  2227. dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
  2228. return 0;
  2229. out:
  2230. free_netdev(dev);
  2231. return err;
  2232. }
  2233. static int mv643xx_eth_remove(struct platform_device *pdev)
  2234. {
  2235. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2236. unregister_netdev(mp->dev);
  2237. if (mp->phy != NULL)
  2238. phy_detach(mp->phy);
  2239. flush_scheduled_work();
  2240. free_netdev(mp->dev);
  2241. platform_set_drvdata(pdev, NULL);
  2242. return 0;
  2243. }
  2244. static void mv643xx_eth_shutdown(struct platform_device *pdev)
  2245. {
  2246. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2247. /* Mask all interrupts on ethernet port */
  2248. wrlp(mp, INT_MASK, 0);
  2249. rdlp(mp, INT_MASK);
  2250. if (netif_running(mp->dev))
  2251. port_reset(mp);
  2252. }
  2253. static struct platform_driver mv643xx_eth_driver = {
  2254. .probe = mv643xx_eth_probe,
  2255. .remove = mv643xx_eth_remove,
  2256. .shutdown = mv643xx_eth_shutdown,
  2257. .driver = {
  2258. .name = MV643XX_ETH_NAME,
  2259. .owner = THIS_MODULE,
  2260. },
  2261. };
  2262. static int __init mv643xx_eth_init_module(void)
  2263. {
  2264. int rc;
  2265. rc = platform_driver_register(&mv643xx_eth_shared_driver);
  2266. if (!rc) {
  2267. rc = platform_driver_register(&mv643xx_eth_driver);
  2268. if (rc)
  2269. platform_driver_unregister(&mv643xx_eth_shared_driver);
  2270. }
  2271. return rc;
  2272. }
  2273. module_init(mv643xx_eth_init_module);
  2274. static void __exit mv643xx_eth_cleanup_module(void)
  2275. {
  2276. platform_driver_unregister(&mv643xx_eth_driver);
  2277. platform_driver_unregister(&mv643xx_eth_shared_driver);
  2278. }
  2279. module_exit(mv643xx_eth_cleanup_module);
  2280. MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
  2281. "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
  2282. MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
  2283. MODULE_LICENSE("GPL");
  2284. MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
  2285. MODULE_ALIAS("platform:" MV643XX_ETH_NAME);