mv643xx_eth.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618
  1. /*
  2. * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
  3. * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
  4. *
  5. * Based on the 64360 driver from:
  6. * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
  7. * Rabeeh Khoury <rabeeh@marvell.com>
  8. *
  9. * Copyright (C) 2003 PMC-Sierra, Inc.,
  10. * written by Manish Lachwani
  11. *
  12. * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
  13. *
  14. * Copyright (C) 2004-2006 MontaVista Software, Inc.
  15. * Dale Farnsworth <dale@farnsworth.org>
  16. *
  17. * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
  18. * <sjhill@realitydiluted.com>
  19. *
  20. * Copyright (C) 2007-2008 Marvell Semiconductor
  21. * Lennert Buytenhek <buytenh@marvell.com>
  22. *
  23. * This program is free software; you can redistribute it and/or
  24. * modify it under the terms of the GNU General Public License
  25. * as published by the Free Software Foundation; either version 2
  26. * of the License, or (at your option) any later version.
  27. *
  28. * This program is distributed in the hope that it will be useful,
  29. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  31. * GNU General Public License for more details.
  32. *
  33. * You should have received a copy of the GNU General Public License
  34. * along with this program; if not, write to the Free Software
  35. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  36. */
  37. #include <linux/init.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/in.h>
  40. #include <linux/tcp.h>
  41. #include <linux/udp.h>
  42. #include <linux/etherdevice.h>
  43. #include <linux/delay.h>
  44. #include <linux/ethtool.h>
  45. #include <linux/platform_device.h>
  46. #include <linux/module.h>
  47. #include <linux/kernel.h>
  48. #include <linux/spinlock.h>
  49. #include <linux/workqueue.h>
  50. #include <linux/mii.h>
  51. #include <linux/mv643xx_eth.h>
  52. #include <asm/io.h>
  53. #include <asm/types.h>
  54. #include <asm/system.h>
  55. static char mv643xx_eth_driver_name[] = "mv643xx_eth";
  56. static char mv643xx_eth_driver_version[] = "1.1";
  57. #define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
  58. #define MV643XX_ETH_NAPI
  59. #define MV643XX_ETH_TX_FAST_REFILL
  60. #ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
  61. #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
  62. #else
  63. #define MAX_DESCS_PER_SKB 1
  64. #endif
  65. /*
  66. * Registers shared between all ports.
  67. */
  68. #define PHY_ADDR 0x0000
  69. #define SMI_REG 0x0004
  70. #define WINDOW_BASE(w) (0x0200 + ((w) << 3))
  71. #define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
  72. #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
  73. #define WINDOW_BAR_ENABLE 0x0290
  74. #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
  75. /*
  76. * Per-port registers.
  77. */
  78. #define PORT_CONFIG(p) (0x0400 + ((p) << 10))
  79. #define UNICAST_PROMISCUOUS_MODE 0x00000001
  80. #define PORT_CONFIG_EXT(p) (0x0404 + ((p) << 10))
  81. #define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10))
  82. #define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10))
  83. #define SDMA_CONFIG(p) (0x041c + ((p) << 10))
  84. #define PORT_SERIAL_CONTROL(p) (0x043c + ((p) << 10))
  85. #define PORT_STATUS(p) (0x0444 + ((p) << 10))
  86. #define TX_FIFO_EMPTY 0x00000400
  87. #define TX_IN_PROGRESS 0x00000080
  88. #define LINK_UP 0x00000002
  89. #define TXQ_COMMAND(p) (0x0448 + ((p) << 10))
  90. #define TXQ_FIX_PRIO_CONF(p) (0x044c + ((p) << 10))
  91. #define TX_BW_RATE(p) (0x0450 + ((p) << 10))
  92. #define TX_BW_MTU(p) (0x0458 + ((p) << 10))
  93. #define TX_BW_BURST(p) (0x045c + ((p) << 10))
  94. #define INT_CAUSE(p) (0x0460 + ((p) << 10))
  95. #define INT_TX_END_0 0x00080000
  96. #define INT_TX_END 0x07f80000
  97. #define INT_RX 0x0007fbfc
  98. #define INT_EXT 0x00000002
  99. #define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10))
  100. #define INT_EXT_LINK 0x00100000
  101. #define INT_EXT_PHY 0x00010000
  102. #define INT_EXT_TX_ERROR_0 0x00000100
  103. #define INT_EXT_TX_0 0x00000001
  104. #define INT_EXT_TX 0x0000ffff
  105. #define INT_MASK(p) (0x0468 + ((p) << 10))
  106. #define INT_MASK_EXT(p) (0x046c + ((p) << 10))
  107. #define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10))
  108. #define TXQ_FIX_PRIO_CONF_MOVED(p) (0x04dc + ((p) << 10))
  109. #define TX_BW_RATE_MOVED(p) (0x04e0 + ((p) << 10))
  110. #define TX_BW_MTU_MOVED(p) (0x04e8 + ((p) << 10))
  111. #define TX_BW_BURST_MOVED(p) (0x04ec + ((p) << 10))
  112. #define RXQ_CURRENT_DESC_PTR(p, q) (0x060c + ((p) << 10) + ((q) << 4))
  113. #define RXQ_COMMAND(p) (0x0680 + ((p) << 10))
  114. #define TXQ_CURRENT_DESC_PTR(p, q) (0x06c0 + ((p) << 10) + ((q) << 2))
  115. #define TXQ_BW_TOKENS(p, q) (0x0700 + ((p) << 10) + ((q) << 4))
  116. #define TXQ_BW_CONF(p, q) (0x0704 + ((p) << 10) + ((q) << 4))
  117. #define TXQ_BW_WRR_CONF(p, q) (0x0708 + ((p) << 10) + ((q) << 4))
  118. #define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
  119. #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
  120. #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
  121. #define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
  122. /*
  123. * SDMA configuration register.
  124. */
  125. #define RX_BURST_SIZE_16_64BIT (4 << 1)
  126. #define BLM_RX_NO_SWAP (1 << 4)
  127. #define BLM_TX_NO_SWAP (1 << 5)
  128. #define TX_BURST_SIZE_16_64BIT (4 << 22)
  129. #if defined(__BIG_ENDIAN)
  130. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  131. RX_BURST_SIZE_16_64BIT | \
  132. TX_BURST_SIZE_16_64BIT
  133. #elif defined(__LITTLE_ENDIAN)
  134. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  135. RX_BURST_SIZE_16_64BIT | \
  136. BLM_RX_NO_SWAP | \
  137. BLM_TX_NO_SWAP | \
  138. TX_BURST_SIZE_16_64BIT
  139. #else
  140. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  141. #endif
  142. /*
  143. * Port serial control register.
  144. */
  145. #define SET_MII_SPEED_TO_100 (1 << 24)
  146. #define SET_GMII_SPEED_TO_1000 (1 << 23)
  147. #define SET_FULL_DUPLEX_MODE (1 << 21)
  148. #define MAX_RX_PACKET_9700BYTE (5 << 17)
  149. #define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
  150. #define DO_NOT_FORCE_LINK_FAIL (1 << 10)
  151. #define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
  152. #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
  153. #define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2)
  154. #define FORCE_LINK_PASS (1 << 1)
  155. #define SERIAL_PORT_ENABLE (1 << 0)
  156. #define DEFAULT_RX_QUEUE_SIZE 400
  157. #define DEFAULT_TX_QUEUE_SIZE 800
  158. /*
  159. * RX/TX descriptors.
  160. */
  161. #if defined(__BIG_ENDIAN)
  162. struct rx_desc {
  163. u16 byte_cnt; /* Descriptor buffer byte count */
  164. u16 buf_size; /* Buffer size */
  165. u32 cmd_sts; /* Descriptor command status */
  166. u32 next_desc_ptr; /* Next descriptor pointer */
  167. u32 buf_ptr; /* Descriptor buffer pointer */
  168. };
  169. struct tx_desc {
  170. u16 byte_cnt; /* buffer byte count */
  171. u16 l4i_chk; /* CPU provided TCP checksum */
  172. u32 cmd_sts; /* Command/status field */
  173. u32 next_desc_ptr; /* Pointer to next descriptor */
  174. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  175. };
  176. #elif defined(__LITTLE_ENDIAN)
  177. struct rx_desc {
  178. u32 cmd_sts; /* Descriptor command status */
  179. u16 buf_size; /* Buffer size */
  180. u16 byte_cnt; /* Descriptor buffer byte count */
  181. u32 buf_ptr; /* Descriptor buffer pointer */
  182. u32 next_desc_ptr; /* Next descriptor pointer */
  183. };
  184. struct tx_desc {
  185. u32 cmd_sts; /* Command/status field */
  186. u16 l4i_chk; /* CPU provided TCP checksum */
  187. u16 byte_cnt; /* buffer byte count */
  188. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  189. u32 next_desc_ptr; /* Pointer to next descriptor */
  190. };
  191. #else
  192. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  193. #endif
  194. /* RX & TX descriptor command */
  195. #define BUFFER_OWNED_BY_DMA 0x80000000
  196. /* RX & TX descriptor status */
  197. #define ERROR_SUMMARY 0x00000001
  198. /* RX descriptor status */
  199. #define LAYER_4_CHECKSUM_OK 0x40000000
  200. #define RX_ENABLE_INTERRUPT 0x20000000
  201. #define RX_FIRST_DESC 0x08000000
  202. #define RX_LAST_DESC 0x04000000
  203. /* TX descriptor command */
  204. #define TX_ENABLE_INTERRUPT 0x00800000
  205. #define GEN_CRC 0x00400000
  206. #define TX_FIRST_DESC 0x00200000
  207. #define TX_LAST_DESC 0x00100000
  208. #define ZERO_PADDING 0x00080000
  209. #define GEN_IP_V4_CHECKSUM 0x00040000
  210. #define GEN_TCP_UDP_CHECKSUM 0x00020000
  211. #define UDP_FRAME 0x00010000
  212. #define TX_IHL_SHIFT 11
  213. /* global *******************************************************************/
  214. struct mv643xx_eth_shared_private {
  215. /*
  216. * Ethernet controller base address.
  217. */
  218. void __iomem *base;
  219. /*
  220. * Protects access to SMI_REG, which is shared between ports.
  221. */
  222. spinlock_t phy_lock;
  223. /*
  224. * Per-port MBUS window access register value.
  225. */
  226. u32 win_protect;
  227. /*
  228. * Hardware-specific parameters.
  229. */
  230. unsigned int t_clk;
  231. int extended_rx_coal_limit;
  232. int tx_bw_control_moved;
  233. };
  234. /* per-port *****************************************************************/
  235. struct mib_counters {
  236. u64 good_octets_received;
  237. u32 bad_octets_received;
  238. u32 internal_mac_transmit_err;
  239. u32 good_frames_received;
  240. u32 bad_frames_received;
  241. u32 broadcast_frames_received;
  242. u32 multicast_frames_received;
  243. u32 frames_64_octets;
  244. u32 frames_65_to_127_octets;
  245. u32 frames_128_to_255_octets;
  246. u32 frames_256_to_511_octets;
  247. u32 frames_512_to_1023_octets;
  248. u32 frames_1024_to_max_octets;
  249. u64 good_octets_sent;
  250. u32 good_frames_sent;
  251. u32 excessive_collision;
  252. u32 multicast_frames_sent;
  253. u32 broadcast_frames_sent;
  254. u32 unrec_mac_control_received;
  255. u32 fc_sent;
  256. u32 good_fc_received;
  257. u32 bad_fc_received;
  258. u32 undersize_received;
  259. u32 fragments_received;
  260. u32 oversize_received;
  261. u32 jabber_received;
  262. u32 mac_receive_error;
  263. u32 bad_crc_event;
  264. u32 collision;
  265. u32 late_collision;
  266. };
  267. struct rx_queue {
  268. int index;
  269. int rx_ring_size;
  270. int rx_desc_count;
  271. int rx_curr_desc;
  272. int rx_used_desc;
  273. struct rx_desc *rx_desc_area;
  274. dma_addr_t rx_desc_dma;
  275. int rx_desc_area_size;
  276. struct sk_buff **rx_skb;
  277. struct timer_list rx_oom;
  278. };
  279. struct tx_queue {
  280. int index;
  281. int tx_ring_size;
  282. int tx_desc_count;
  283. int tx_curr_desc;
  284. int tx_used_desc;
  285. struct tx_desc *tx_desc_area;
  286. dma_addr_t tx_desc_dma;
  287. int tx_desc_area_size;
  288. struct sk_buff **tx_skb;
  289. };
  290. struct mv643xx_eth_private {
  291. struct mv643xx_eth_shared_private *shared;
  292. int port_num;
  293. struct net_device *dev;
  294. struct mv643xx_eth_shared_private *shared_smi;
  295. int phy_addr;
  296. spinlock_t lock;
  297. struct mib_counters mib_counters;
  298. struct work_struct tx_timeout_task;
  299. struct mii_if_info mii;
  300. /*
  301. * RX state.
  302. */
  303. int default_rx_ring_size;
  304. unsigned long rx_desc_sram_addr;
  305. int rx_desc_sram_size;
  306. u8 rxq_mask;
  307. int rxq_primary;
  308. struct napi_struct napi;
  309. struct rx_queue rxq[8];
  310. /*
  311. * TX state.
  312. */
  313. int default_tx_ring_size;
  314. unsigned long tx_desc_sram_addr;
  315. int tx_desc_sram_size;
  316. u8 txq_mask;
  317. int txq_primary;
  318. struct tx_queue txq[8];
  319. #ifdef MV643XX_ETH_TX_FAST_REFILL
  320. int tx_clean_threshold;
  321. #endif
  322. };
  323. /* port register accessors **************************************************/
  324. static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
  325. {
  326. return readl(mp->shared->base + offset);
  327. }
  328. static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
  329. {
  330. writel(data, mp->shared->base + offset);
  331. }
  332. /* rxq/txq helper functions *************************************************/
  333. static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
  334. {
  335. return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
  336. }
  337. static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
  338. {
  339. return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
  340. }
  341. static void rxq_enable(struct rx_queue *rxq)
  342. {
  343. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  344. wrl(mp, RXQ_COMMAND(mp->port_num), 1 << rxq->index);
  345. }
  346. static void rxq_disable(struct rx_queue *rxq)
  347. {
  348. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  349. u8 mask = 1 << rxq->index;
  350. wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8);
  351. while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask)
  352. udelay(10);
  353. }
  354. static void txq_reset_hw_ptr(struct tx_queue *txq)
  355. {
  356. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  357. int off = TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index);
  358. u32 addr;
  359. addr = (u32)txq->tx_desc_dma;
  360. addr += txq->tx_curr_desc * sizeof(struct tx_desc);
  361. wrl(mp, off, addr);
  362. }
  363. static void txq_enable(struct tx_queue *txq)
  364. {
  365. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  366. wrl(mp, TXQ_COMMAND(mp->port_num), 1 << txq->index);
  367. }
  368. static void txq_disable(struct tx_queue *txq)
  369. {
  370. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  371. u8 mask = 1 << txq->index;
  372. wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8);
  373. while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask)
  374. udelay(10);
  375. }
  376. static void __txq_maybe_wake(struct tx_queue *txq)
  377. {
  378. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  379. /*
  380. * netif_{stop,wake}_queue() flow control only applies to
  381. * the primary queue.
  382. */
  383. BUG_ON(txq->index != mp->txq_primary);
  384. if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB)
  385. netif_wake_queue(mp->dev);
  386. }
  387. /* rx ***********************************************************************/
  388. static void txq_reclaim(struct tx_queue *txq, int force);
  389. static void rxq_refill(struct rx_queue *rxq)
  390. {
  391. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  392. unsigned long flags;
  393. spin_lock_irqsave(&mp->lock, flags);
  394. while (rxq->rx_desc_count < rxq->rx_ring_size) {
  395. int skb_size;
  396. struct sk_buff *skb;
  397. int unaligned;
  398. int rx;
  399. /*
  400. * Reserve 2+14 bytes for an ethernet header (the
  401. * hardware automatically prepends 2 bytes of dummy
  402. * data to each received packet), 4 bytes for a VLAN
  403. * header, and 4 bytes for the trailing FCS -- 24
  404. * bytes total.
  405. */
  406. skb_size = mp->dev->mtu + 24;
  407. skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
  408. if (skb == NULL)
  409. break;
  410. unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
  411. if (unaligned)
  412. skb_reserve(skb, dma_get_cache_alignment() - unaligned);
  413. rxq->rx_desc_count++;
  414. rx = rxq->rx_used_desc;
  415. rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size;
  416. rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
  417. skb_size, DMA_FROM_DEVICE);
  418. rxq->rx_desc_area[rx].buf_size = skb_size;
  419. rxq->rx_skb[rx] = skb;
  420. wmb();
  421. rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
  422. RX_ENABLE_INTERRUPT;
  423. wmb();
  424. /*
  425. * The hardware automatically prepends 2 bytes of
  426. * dummy data to each received packet, so that the
  427. * IP header ends up 16-byte aligned.
  428. */
  429. skb_reserve(skb, 2);
  430. }
  431. if (rxq->rx_desc_count != rxq->rx_ring_size) {
  432. rxq->rx_oom.expires = jiffies + (HZ / 10);
  433. add_timer(&rxq->rx_oom);
  434. }
  435. spin_unlock_irqrestore(&mp->lock, flags);
  436. }
  437. static inline void rxq_refill_timer_wrapper(unsigned long data)
  438. {
  439. rxq_refill((struct rx_queue *)data);
  440. }
  441. static int rxq_process(struct rx_queue *rxq, int budget)
  442. {
  443. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  444. struct net_device_stats *stats = &mp->dev->stats;
  445. int rx;
  446. rx = 0;
  447. while (rx < budget) {
  448. struct rx_desc *rx_desc;
  449. unsigned int cmd_sts;
  450. struct sk_buff *skb;
  451. unsigned long flags;
  452. spin_lock_irqsave(&mp->lock, flags);
  453. rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
  454. cmd_sts = rx_desc->cmd_sts;
  455. if (cmd_sts & BUFFER_OWNED_BY_DMA) {
  456. spin_unlock_irqrestore(&mp->lock, flags);
  457. break;
  458. }
  459. rmb();
  460. skb = rxq->rx_skb[rxq->rx_curr_desc];
  461. rxq->rx_skb[rxq->rx_curr_desc] = NULL;
  462. rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size;
  463. spin_unlock_irqrestore(&mp->lock, flags);
  464. dma_unmap_single(NULL, rx_desc->buf_ptr + 2,
  465. mp->dev->mtu + 24, DMA_FROM_DEVICE);
  466. rxq->rx_desc_count--;
  467. rx++;
  468. /*
  469. * Update statistics.
  470. *
  471. * Note that the descriptor byte count includes 2 dummy
  472. * bytes automatically inserted by the hardware at the
  473. * start of the packet (which we don't count), and a 4
  474. * byte CRC at the end of the packet (which we do count).
  475. */
  476. stats->rx_packets++;
  477. stats->rx_bytes += rx_desc->byte_cnt - 2;
  478. /*
  479. * In case we received a packet without first / last bits
  480. * on, or the error summary bit is set, the packet needs
  481. * to be dropped.
  482. */
  483. if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  484. (RX_FIRST_DESC | RX_LAST_DESC))
  485. || (cmd_sts & ERROR_SUMMARY)) {
  486. stats->rx_dropped++;
  487. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  488. (RX_FIRST_DESC | RX_LAST_DESC)) {
  489. if (net_ratelimit())
  490. dev_printk(KERN_ERR, &mp->dev->dev,
  491. "received packet spanning "
  492. "multiple descriptors\n");
  493. }
  494. if (cmd_sts & ERROR_SUMMARY)
  495. stats->rx_errors++;
  496. dev_kfree_skb_irq(skb);
  497. } else {
  498. /*
  499. * The -4 is for the CRC in the trailer of the
  500. * received packet
  501. */
  502. skb_put(skb, rx_desc->byte_cnt - 2 - 4);
  503. if (cmd_sts & LAYER_4_CHECKSUM_OK) {
  504. skb->ip_summed = CHECKSUM_UNNECESSARY;
  505. skb->csum = htons(
  506. (cmd_sts & 0x0007fff8) >> 3);
  507. }
  508. skb->protocol = eth_type_trans(skb, mp->dev);
  509. #ifdef MV643XX_ETH_NAPI
  510. netif_receive_skb(skb);
  511. #else
  512. netif_rx(skb);
  513. #endif
  514. }
  515. mp->dev->last_rx = jiffies;
  516. }
  517. rxq_refill(rxq);
  518. return rx;
  519. }
  520. #ifdef MV643XX_ETH_NAPI
  521. static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
  522. {
  523. struct mv643xx_eth_private *mp;
  524. int rx;
  525. int i;
  526. mp = container_of(napi, struct mv643xx_eth_private, napi);
  527. #ifdef MV643XX_ETH_TX_FAST_REFILL
  528. if (++mp->tx_clean_threshold > 5) {
  529. mp->tx_clean_threshold = 0;
  530. for (i = 0; i < 8; i++)
  531. if (mp->txq_mask & (1 << i))
  532. txq_reclaim(mp->txq + i, 0);
  533. if (netif_carrier_ok(mp->dev)) {
  534. spin_lock(&mp->lock);
  535. __txq_maybe_wake(mp->txq + mp->txq_primary);
  536. spin_unlock(&mp->lock);
  537. }
  538. }
  539. #endif
  540. rx = 0;
  541. for (i = 7; rx < budget && i >= 0; i--)
  542. if (mp->rxq_mask & (1 << i))
  543. rx += rxq_process(mp->rxq + i, budget - rx);
  544. if (rx < budget) {
  545. netif_rx_complete(mp->dev, napi);
  546. wrl(mp, INT_CAUSE(mp->port_num), 0);
  547. wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
  548. wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
  549. }
  550. return rx;
  551. }
  552. #endif
  553. /* tx ***********************************************************************/
  554. static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
  555. {
  556. int frag;
  557. for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
  558. skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
  559. if (fragp->size <= 8 && fragp->page_offset & 7)
  560. return 1;
  561. }
  562. return 0;
  563. }
  564. static int txq_alloc_desc_index(struct tx_queue *txq)
  565. {
  566. int tx_desc_curr;
  567. BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
  568. tx_desc_curr = txq->tx_curr_desc;
  569. txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size;
  570. BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
  571. return tx_desc_curr;
  572. }
  573. static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
  574. {
  575. int nr_frags = skb_shinfo(skb)->nr_frags;
  576. int frag;
  577. for (frag = 0; frag < nr_frags; frag++) {
  578. skb_frag_t *this_frag;
  579. int tx_index;
  580. struct tx_desc *desc;
  581. this_frag = &skb_shinfo(skb)->frags[frag];
  582. tx_index = txq_alloc_desc_index(txq);
  583. desc = &txq->tx_desc_area[tx_index];
  584. /*
  585. * The last fragment will generate an interrupt
  586. * which will free the skb on TX completion.
  587. */
  588. if (frag == nr_frags - 1) {
  589. desc->cmd_sts = BUFFER_OWNED_BY_DMA |
  590. ZERO_PADDING | TX_LAST_DESC |
  591. TX_ENABLE_INTERRUPT;
  592. txq->tx_skb[tx_index] = skb;
  593. } else {
  594. desc->cmd_sts = BUFFER_OWNED_BY_DMA;
  595. txq->tx_skb[tx_index] = NULL;
  596. }
  597. desc->l4i_chk = 0;
  598. desc->byte_cnt = this_frag->size;
  599. desc->buf_ptr = dma_map_page(NULL, this_frag->page,
  600. this_frag->page_offset,
  601. this_frag->size,
  602. DMA_TO_DEVICE);
  603. }
  604. }
  605. static inline __be16 sum16_as_be(__sum16 sum)
  606. {
  607. return (__force __be16)sum;
  608. }
  609. static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
  610. {
  611. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  612. int nr_frags = skb_shinfo(skb)->nr_frags;
  613. int tx_index;
  614. struct tx_desc *desc;
  615. u32 cmd_sts;
  616. int length;
  617. cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
  618. tx_index = txq_alloc_desc_index(txq);
  619. desc = &txq->tx_desc_area[tx_index];
  620. if (nr_frags) {
  621. txq_submit_frag_skb(txq, skb);
  622. length = skb_headlen(skb);
  623. txq->tx_skb[tx_index] = NULL;
  624. } else {
  625. cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
  626. length = skb->len;
  627. txq->tx_skb[tx_index] = skb;
  628. }
  629. desc->byte_cnt = length;
  630. desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
  631. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  632. BUG_ON(skb->protocol != htons(ETH_P_IP));
  633. cmd_sts |= GEN_TCP_UDP_CHECKSUM |
  634. GEN_IP_V4_CHECKSUM |
  635. ip_hdr(skb)->ihl << TX_IHL_SHIFT;
  636. switch (ip_hdr(skb)->protocol) {
  637. case IPPROTO_UDP:
  638. cmd_sts |= UDP_FRAME;
  639. desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
  640. break;
  641. case IPPROTO_TCP:
  642. desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
  643. break;
  644. default:
  645. BUG();
  646. }
  647. } else {
  648. /* Errata BTS #50, IHL must be 5 if no HW checksum */
  649. cmd_sts |= 5 << TX_IHL_SHIFT;
  650. desc->l4i_chk = 0;
  651. }
  652. /* ensure all other descriptors are written before first cmd_sts */
  653. wmb();
  654. desc->cmd_sts = cmd_sts;
  655. /* clear TX_END interrupt status */
  656. wrl(mp, INT_CAUSE(mp->port_num), ~(INT_TX_END_0 << txq->index));
  657. rdl(mp, INT_CAUSE(mp->port_num));
  658. /* ensure all descriptors are written before poking hardware */
  659. wmb();
  660. txq_enable(txq);
  661. txq->tx_desc_count += nr_frags + 1;
  662. }
  663. static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  664. {
  665. struct mv643xx_eth_private *mp = netdev_priv(dev);
  666. struct net_device_stats *stats = &dev->stats;
  667. struct tx_queue *txq;
  668. unsigned long flags;
  669. if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
  670. stats->tx_dropped++;
  671. dev_printk(KERN_DEBUG, &dev->dev,
  672. "failed to linearize skb with tiny "
  673. "unaligned fragment\n");
  674. return NETDEV_TX_BUSY;
  675. }
  676. spin_lock_irqsave(&mp->lock, flags);
  677. txq = mp->txq + mp->txq_primary;
  678. if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) {
  679. spin_unlock_irqrestore(&mp->lock, flags);
  680. if (txq->index == mp->txq_primary && net_ratelimit())
  681. dev_printk(KERN_ERR, &dev->dev,
  682. "primary tx queue full?!\n");
  683. kfree_skb(skb);
  684. return NETDEV_TX_OK;
  685. }
  686. txq_submit_skb(txq, skb);
  687. stats->tx_bytes += skb->len;
  688. stats->tx_packets++;
  689. dev->trans_start = jiffies;
  690. if (txq->index == mp->txq_primary) {
  691. int entries_left;
  692. entries_left = txq->tx_ring_size - txq->tx_desc_count;
  693. if (entries_left < MAX_DESCS_PER_SKB)
  694. netif_stop_queue(dev);
  695. }
  696. spin_unlock_irqrestore(&mp->lock, flags);
  697. return NETDEV_TX_OK;
  698. }
  699. /* tx rate control **********************************************************/
  700. /*
  701. * Set total maximum TX rate (shared by all TX queues for this port)
  702. * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
  703. */
  704. static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
  705. {
  706. int token_rate;
  707. int mtu;
  708. int bucket_size;
  709. token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
  710. if (token_rate > 1023)
  711. token_rate = 1023;
  712. mtu = (mp->dev->mtu + 255) >> 8;
  713. if (mtu > 63)
  714. mtu = 63;
  715. bucket_size = (burst + 255) >> 8;
  716. if (bucket_size > 65535)
  717. bucket_size = 65535;
  718. if (mp->shared->tx_bw_control_moved) {
  719. wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate);
  720. wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu);
  721. wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size);
  722. } else {
  723. wrl(mp, TX_BW_RATE(mp->port_num), token_rate);
  724. wrl(mp, TX_BW_MTU(mp->port_num), mtu);
  725. wrl(mp, TX_BW_BURST(mp->port_num), bucket_size);
  726. }
  727. }
  728. static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
  729. {
  730. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  731. int token_rate;
  732. int bucket_size;
  733. token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
  734. if (token_rate > 1023)
  735. token_rate = 1023;
  736. bucket_size = (burst + 255) >> 8;
  737. if (bucket_size > 65535)
  738. bucket_size = 65535;
  739. wrl(mp, TXQ_BW_TOKENS(mp->port_num, txq->index), token_rate << 14);
  740. wrl(mp, TXQ_BW_CONF(mp->port_num, txq->index),
  741. (bucket_size << 10) | token_rate);
  742. }
  743. static void txq_set_fixed_prio_mode(struct tx_queue *txq)
  744. {
  745. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  746. int off;
  747. u32 val;
  748. /*
  749. * Turn on fixed priority mode.
  750. */
  751. if (mp->shared->tx_bw_control_moved)
  752. off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
  753. else
  754. off = TXQ_FIX_PRIO_CONF(mp->port_num);
  755. val = rdl(mp, off);
  756. val |= 1 << txq->index;
  757. wrl(mp, off, val);
  758. }
  759. static void txq_set_wrr(struct tx_queue *txq, int weight)
  760. {
  761. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  762. int off;
  763. u32 val;
  764. /*
  765. * Turn off fixed priority mode.
  766. */
  767. if (mp->shared->tx_bw_control_moved)
  768. off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
  769. else
  770. off = TXQ_FIX_PRIO_CONF(mp->port_num);
  771. val = rdl(mp, off);
  772. val &= ~(1 << txq->index);
  773. wrl(mp, off, val);
  774. /*
  775. * Configure WRR weight for this queue.
  776. */
  777. off = TXQ_BW_WRR_CONF(mp->port_num, txq->index);
  778. val = rdl(mp, off);
  779. val = (val & ~0xff) | (weight & 0xff);
  780. wrl(mp, off, val);
  781. }
  782. /* mii management interface *************************************************/
  783. #define SMI_BUSY 0x10000000
  784. #define SMI_READ_VALID 0x08000000
  785. #define SMI_OPCODE_READ 0x04000000
  786. #define SMI_OPCODE_WRITE 0x00000000
  787. static void smi_reg_read(struct mv643xx_eth_private *mp, unsigned int addr,
  788. unsigned int reg, unsigned int *value)
  789. {
  790. void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
  791. unsigned long flags;
  792. int i;
  793. /* the SMI register is a shared resource */
  794. spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
  795. /* wait for the SMI register to become available */
  796. for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
  797. if (i == 1000) {
  798. printk("%s: PHY busy timeout\n", mp->dev->name);
  799. goto out;
  800. }
  801. udelay(10);
  802. }
  803. writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
  804. /* now wait for the data to be valid */
  805. for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) {
  806. if (i == 1000) {
  807. printk("%s: PHY read timeout\n", mp->dev->name);
  808. goto out;
  809. }
  810. udelay(10);
  811. }
  812. *value = readl(smi_reg) & 0xffff;
  813. out:
  814. spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
  815. }
  816. static void smi_reg_write(struct mv643xx_eth_private *mp,
  817. unsigned int addr,
  818. unsigned int reg, unsigned int value)
  819. {
  820. void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
  821. unsigned long flags;
  822. int i;
  823. /* the SMI register is a shared resource */
  824. spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
  825. /* wait for the SMI register to become available */
  826. for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
  827. if (i == 1000) {
  828. printk("%s: PHY busy timeout\n", mp->dev->name);
  829. goto out;
  830. }
  831. udelay(10);
  832. }
  833. writel(SMI_OPCODE_WRITE | (reg << 21) |
  834. (addr << 16) | (value & 0xffff), smi_reg);
  835. out:
  836. spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
  837. }
  838. /* mib counters *************************************************************/
  839. static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
  840. {
  841. return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
  842. }
  843. static void mib_counters_clear(struct mv643xx_eth_private *mp)
  844. {
  845. int i;
  846. for (i = 0; i < 0x80; i += 4)
  847. mib_read(mp, i);
  848. }
  849. static void mib_counters_update(struct mv643xx_eth_private *mp)
  850. {
  851. struct mib_counters *p = &mp->mib_counters;
  852. p->good_octets_received += mib_read(mp, 0x00);
  853. p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
  854. p->bad_octets_received += mib_read(mp, 0x08);
  855. p->internal_mac_transmit_err += mib_read(mp, 0x0c);
  856. p->good_frames_received += mib_read(mp, 0x10);
  857. p->bad_frames_received += mib_read(mp, 0x14);
  858. p->broadcast_frames_received += mib_read(mp, 0x18);
  859. p->multicast_frames_received += mib_read(mp, 0x1c);
  860. p->frames_64_octets += mib_read(mp, 0x20);
  861. p->frames_65_to_127_octets += mib_read(mp, 0x24);
  862. p->frames_128_to_255_octets += mib_read(mp, 0x28);
  863. p->frames_256_to_511_octets += mib_read(mp, 0x2c);
  864. p->frames_512_to_1023_octets += mib_read(mp, 0x30);
  865. p->frames_1024_to_max_octets += mib_read(mp, 0x34);
  866. p->good_octets_sent += mib_read(mp, 0x38);
  867. p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
  868. p->good_frames_sent += mib_read(mp, 0x40);
  869. p->excessive_collision += mib_read(mp, 0x44);
  870. p->multicast_frames_sent += mib_read(mp, 0x48);
  871. p->broadcast_frames_sent += mib_read(mp, 0x4c);
  872. p->unrec_mac_control_received += mib_read(mp, 0x50);
  873. p->fc_sent += mib_read(mp, 0x54);
  874. p->good_fc_received += mib_read(mp, 0x58);
  875. p->bad_fc_received += mib_read(mp, 0x5c);
  876. p->undersize_received += mib_read(mp, 0x60);
  877. p->fragments_received += mib_read(mp, 0x64);
  878. p->oversize_received += mib_read(mp, 0x68);
  879. p->jabber_received += mib_read(mp, 0x6c);
  880. p->mac_receive_error += mib_read(mp, 0x70);
  881. p->bad_crc_event += mib_read(mp, 0x74);
  882. p->collision += mib_read(mp, 0x78);
  883. p->late_collision += mib_read(mp, 0x7c);
  884. }
  885. /* ethtool ******************************************************************/
  886. struct mv643xx_eth_stats {
  887. char stat_string[ETH_GSTRING_LEN];
  888. int sizeof_stat;
  889. int netdev_off;
  890. int mp_off;
  891. };
  892. #define SSTAT(m) \
  893. { #m, FIELD_SIZEOF(struct net_device_stats, m), \
  894. offsetof(struct net_device, stats.m), -1 }
  895. #define MIBSTAT(m) \
  896. { #m, FIELD_SIZEOF(struct mib_counters, m), \
  897. -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
  898. static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
  899. SSTAT(rx_packets),
  900. SSTAT(tx_packets),
  901. SSTAT(rx_bytes),
  902. SSTAT(tx_bytes),
  903. SSTAT(rx_errors),
  904. SSTAT(tx_errors),
  905. SSTAT(rx_dropped),
  906. SSTAT(tx_dropped),
  907. MIBSTAT(good_octets_received),
  908. MIBSTAT(bad_octets_received),
  909. MIBSTAT(internal_mac_transmit_err),
  910. MIBSTAT(good_frames_received),
  911. MIBSTAT(bad_frames_received),
  912. MIBSTAT(broadcast_frames_received),
  913. MIBSTAT(multicast_frames_received),
  914. MIBSTAT(frames_64_octets),
  915. MIBSTAT(frames_65_to_127_octets),
  916. MIBSTAT(frames_128_to_255_octets),
  917. MIBSTAT(frames_256_to_511_octets),
  918. MIBSTAT(frames_512_to_1023_octets),
  919. MIBSTAT(frames_1024_to_max_octets),
  920. MIBSTAT(good_octets_sent),
  921. MIBSTAT(good_frames_sent),
  922. MIBSTAT(excessive_collision),
  923. MIBSTAT(multicast_frames_sent),
  924. MIBSTAT(broadcast_frames_sent),
  925. MIBSTAT(unrec_mac_control_received),
  926. MIBSTAT(fc_sent),
  927. MIBSTAT(good_fc_received),
  928. MIBSTAT(bad_fc_received),
  929. MIBSTAT(undersize_received),
  930. MIBSTAT(fragments_received),
  931. MIBSTAT(oversize_received),
  932. MIBSTAT(jabber_received),
  933. MIBSTAT(mac_receive_error),
  934. MIBSTAT(bad_crc_event),
  935. MIBSTAT(collision),
  936. MIBSTAT(late_collision),
  937. };
  938. static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  939. {
  940. struct mv643xx_eth_private *mp = netdev_priv(dev);
  941. int err;
  942. spin_lock_irq(&mp->lock);
  943. err = mii_ethtool_gset(&mp->mii, cmd);
  944. spin_unlock_irq(&mp->lock);
  945. /*
  946. * The MAC does not support 1000baseT_Half.
  947. */
  948. cmd->supported &= ~SUPPORTED_1000baseT_Half;
  949. cmd->advertising &= ~ADVERTISED_1000baseT_Half;
  950. return err;
  951. }
  952. static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
  953. {
  954. struct mv643xx_eth_private *mp = netdev_priv(dev);
  955. u32 port_status;
  956. port_status = rdl(mp, PORT_STATUS(mp->port_num));
  957. cmd->supported = SUPPORTED_MII;
  958. cmd->advertising = ADVERTISED_MII;
  959. switch (port_status & PORT_SPEED_MASK) {
  960. case PORT_SPEED_10:
  961. cmd->speed = SPEED_10;
  962. break;
  963. case PORT_SPEED_100:
  964. cmd->speed = SPEED_100;
  965. break;
  966. case PORT_SPEED_1000:
  967. cmd->speed = SPEED_1000;
  968. break;
  969. default:
  970. cmd->speed = -1;
  971. break;
  972. }
  973. cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
  974. cmd->port = PORT_MII;
  975. cmd->phy_address = 0;
  976. cmd->transceiver = XCVR_INTERNAL;
  977. cmd->autoneg = AUTONEG_DISABLE;
  978. cmd->maxtxpkt = 1;
  979. cmd->maxrxpkt = 1;
  980. return 0;
  981. }
  982. static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  983. {
  984. struct mv643xx_eth_private *mp = netdev_priv(dev);
  985. int err;
  986. /*
  987. * The MAC does not support 1000baseT_Half.
  988. */
  989. cmd->advertising &= ~ADVERTISED_1000baseT_Half;
  990. spin_lock_irq(&mp->lock);
  991. err = mii_ethtool_sset(&mp->mii, cmd);
  992. spin_unlock_irq(&mp->lock);
  993. return err;
  994. }
  995. static int mv643xx_eth_set_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
  996. {
  997. return -EINVAL;
  998. }
  999. static void mv643xx_eth_get_drvinfo(struct net_device *dev,
  1000. struct ethtool_drvinfo *drvinfo)
  1001. {
  1002. strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
  1003. strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
  1004. strncpy(drvinfo->fw_version, "N/A", 32);
  1005. strncpy(drvinfo->bus_info, "platform", 32);
  1006. drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
  1007. }
  1008. static int mv643xx_eth_nway_reset(struct net_device *dev)
  1009. {
  1010. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1011. return mii_nway_restart(&mp->mii);
  1012. }
  1013. static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
  1014. {
  1015. return -EINVAL;
  1016. }
  1017. static u32 mv643xx_eth_get_link(struct net_device *dev)
  1018. {
  1019. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1020. return mii_link_ok(&mp->mii);
  1021. }
  1022. static u32 mv643xx_eth_get_link_phyless(struct net_device *dev)
  1023. {
  1024. return 1;
  1025. }
  1026. static void mv643xx_eth_get_strings(struct net_device *dev,
  1027. uint32_t stringset, uint8_t *data)
  1028. {
  1029. int i;
  1030. if (stringset == ETH_SS_STATS) {
  1031. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1032. memcpy(data + i * ETH_GSTRING_LEN,
  1033. mv643xx_eth_stats[i].stat_string,
  1034. ETH_GSTRING_LEN);
  1035. }
  1036. }
  1037. }
  1038. static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
  1039. struct ethtool_stats *stats,
  1040. uint64_t *data)
  1041. {
  1042. struct mv643xx_eth_private *mp = dev->priv;
  1043. int i;
  1044. mib_counters_update(mp);
  1045. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1046. const struct mv643xx_eth_stats *stat;
  1047. void *p;
  1048. stat = mv643xx_eth_stats + i;
  1049. if (stat->netdev_off >= 0)
  1050. p = ((void *)mp->dev) + stat->netdev_off;
  1051. else
  1052. p = ((void *)mp) + stat->mp_off;
  1053. data[i] = (stat->sizeof_stat == 8) ?
  1054. *(uint64_t *)p : *(uint32_t *)p;
  1055. }
  1056. }
  1057. static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
  1058. {
  1059. if (sset == ETH_SS_STATS)
  1060. return ARRAY_SIZE(mv643xx_eth_stats);
  1061. return -EOPNOTSUPP;
  1062. }
  1063. static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
  1064. .get_settings = mv643xx_eth_get_settings,
  1065. .set_settings = mv643xx_eth_set_settings,
  1066. .get_drvinfo = mv643xx_eth_get_drvinfo,
  1067. .nway_reset = mv643xx_eth_nway_reset,
  1068. .get_link = mv643xx_eth_get_link,
  1069. .set_sg = ethtool_op_set_sg,
  1070. .get_strings = mv643xx_eth_get_strings,
  1071. .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
  1072. .get_sset_count = mv643xx_eth_get_sset_count,
  1073. };
  1074. static const struct ethtool_ops mv643xx_eth_ethtool_ops_phyless = {
  1075. .get_settings = mv643xx_eth_get_settings_phyless,
  1076. .set_settings = mv643xx_eth_set_settings_phyless,
  1077. .get_drvinfo = mv643xx_eth_get_drvinfo,
  1078. .nway_reset = mv643xx_eth_nway_reset_phyless,
  1079. .get_link = mv643xx_eth_get_link_phyless,
  1080. .set_sg = ethtool_op_set_sg,
  1081. .get_strings = mv643xx_eth_get_strings,
  1082. .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
  1083. .get_sset_count = mv643xx_eth_get_sset_count,
  1084. };
  1085. /* address handling *********************************************************/
  1086. static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
  1087. {
  1088. unsigned int mac_h;
  1089. unsigned int mac_l;
  1090. mac_h = rdl(mp, MAC_ADDR_HIGH(mp->port_num));
  1091. mac_l = rdl(mp, MAC_ADDR_LOW(mp->port_num));
  1092. addr[0] = (mac_h >> 24) & 0xff;
  1093. addr[1] = (mac_h >> 16) & 0xff;
  1094. addr[2] = (mac_h >> 8) & 0xff;
  1095. addr[3] = mac_h & 0xff;
  1096. addr[4] = (mac_l >> 8) & 0xff;
  1097. addr[5] = mac_l & 0xff;
  1098. }
  1099. static void init_mac_tables(struct mv643xx_eth_private *mp)
  1100. {
  1101. int i;
  1102. for (i = 0; i < 0x100; i += 4) {
  1103. wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
  1104. wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
  1105. }
  1106. for (i = 0; i < 0x10; i += 4)
  1107. wrl(mp, UNICAST_TABLE(mp->port_num) + i, 0);
  1108. }
  1109. static void set_filter_table_entry(struct mv643xx_eth_private *mp,
  1110. int table, unsigned char entry)
  1111. {
  1112. unsigned int table_reg;
  1113. /* Set "accepts frame bit" at specified table entry */
  1114. table_reg = rdl(mp, table + (entry & 0xfc));
  1115. table_reg |= 0x01 << (8 * (entry & 3));
  1116. wrl(mp, table + (entry & 0xfc), table_reg);
  1117. }
  1118. static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
  1119. {
  1120. unsigned int mac_h;
  1121. unsigned int mac_l;
  1122. int table;
  1123. mac_l = (addr[4] << 8) | addr[5];
  1124. mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
  1125. wrl(mp, MAC_ADDR_LOW(mp->port_num), mac_l);
  1126. wrl(mp, MAC_ADDR_HIGH(mp->port_num), mac_h);
  1127. table = UNICAST_TABLE(mp->port_num);
  1128. set_filter_table_entry(mp, table, addr[5] & 0x0f);
  1129. }
  1130. static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
  1131. {
  1132. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1133. /* +2 is for the offset of the HW addr type */
  1134. memcpy(dev->dev_addr, addr + 2, 6);
  1135. init_mac_tables(mp);
  1136. uc_addr_set(mp, dev->dev_addr);
  1137. return 0;
  1138. }
  1139. static int addr_crc(unsigned char *addr)
  1140. {
  1141. int crc = 0;
  1142. int i;
  1143. for (i = 0; i < 6; i++) {
  1144. int j;
  1145. crc = (crc ^ addr[i]) << 8;
  1146. for (j = 7; j >= 0; j--) {
  1147. if (crc & (0x100 << j))
  1148. crc ^= 0x107 << j;
  1149. }
  1150. }
  1151. return crc;
  1152. }
  1153. static void mv643xx_eth_set_rx_mode(struct net_device *dev)
  1154. {
  1155. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1156. u32 port_config;
  1157. struct dev_addr_list *addr;
  1158. int i;
  1159. port_config = rdl(mp, PORT_CONFIG(mp->port_num));
  1160. if (dev->flags & IFF_PROMISC)
  1161. port_config |= UNICAST_PROMISCUOUS_MODE;
  1162. else
  1163. port_config &= ~UNICAST_PROMISCUOUS_MODE;
  1164. wrl(mp, PORT_CONFIG(mp->port_num), port_config);
  1165. if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  1166. int port_num = mp->port_num;
  1167. u32 accept = 0x01010101;
  1168. for (i = 0; i < 0x100; i += 4) {
  1169. wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
  1170. wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
  1171. }
  1172. return;
  1173. }
  1174. for (i = 0; i < 0x100; i += 4) {
  1175. wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
  1176. wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
  1177. }
  1178. for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
  1179. u8 *a = addr->da_addr;
  1180. int table;
  1181. if (addr->da_addrlen != 6)
  1182. continue;
  1183. if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
  1184. table = SPECIAL_MCAST_TABLE(mp->port_num);
  1185. set_filter_table_entry(mp, table, a[5]);
  1186. } else {
  1187. int crc = addr_crc(a);
  1188. table = OTHER_MCAST_TABLE(mp->port_num);
  1189. set_filter_table_entry(mp, table, crc);
  1190. }
  1191. }
  1192. }
  1193. /* rx/tx queue initialisation ***********************************************/
  1194. static int rxq_init(struct mv643xx_eth_private *mp, int index)
  1195. {
  1196. struct rx_queue *rxq = mp->rxq + index;
  1197. struct rx_desc *rx_desc;
  1198. int size;
  1199. int i;
  1200. rxq->index = index;
  1201. rxq->rx_ring_size = mp->default_rx_ring_size;
  1202. rxq->rx_desc_count = 0;
  1203. rxq->rx_curr_desc = 0;
  1204. rxq->rx_used_desc = 0;
  1205. size = rxq->rx_ring_size * sizeof(struct rx_desc);
  1206. if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) {
  1207. rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
  1208. mp->rx_desc_sram_size);
  1209. rxq->rx_desc_dma = mp->rx_desc_sram_addr;
  1210. } else {
  1211. rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
  1212. &rxq->rx_desc_dma,
  1213. GFP_KERNEL);
  1214. }
  1215. if (rxq->rx_desc_area == NULL) {
  1216. dev_printk(KERN_ERR, &mp->dev->dev,
  1217. "can't allocate rx ring (%d bytes)\n", size);
  1218. goto out;
  1219. }
  1220. memset(rxq->rx_desc_area, 0, size);
  1221. rxq->rx_desc_area_size = size;
  1222. rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
  1223. GFP_KERNEL);
  1224. if (rxq->rx_skb == NULL) {
  1225. dev_printk(KERN_ERR, &mp->dev->dev,
  1226. "can't allocate rx skb ring\n");
  1227. goto out_free;
  1228. }
  1229. rx_desc = (struct rx_desc *)rxq->rx_desc_area;
  1230. for (i = 0; i < rxq->rx_ring_size; i++) {
  1231. int nexti = (i + 1) % rxq->rx_ring_size;
  1232. rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
  1233. nexti * sizeof(struct rx_desc);
  1234. }
  1235. init_timer(&rxq->rx_oom);
  1236. rxq->rx_oom.data = (unsigned long)rxq;
  1237. rxq->rx_oom.function = rxq_refill_timer_wrapper;
  1238. return 0;
  1239. out_free:
  1240. if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size)
  1241. iounmap(rxq->rx_desc_area);
  1242. else
  1243. dma_free_coherent(NULL, size,
  1244. rxq->rx_desc_area,
  1245. rxq->rx_desc_dma);
  1246. out:
  1247. return -ENOMEM;
  1248. }
  1249. static void rxq_deinit(struct rx_queue *rxq)
  1250. {
  1251. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  1252. int i;
  1253. rxq_disable(rxq);
  1254. del_timer_sync(&rxq->rx_oom);
  1255. for (i = 0; i < rxq->rx_ring_size; i++) {
  1256. if (rxq->rx_skb[i]) {
  1257. dev_kfree_skb(rxq->rx_skb[i]);
  1258. rxq->rx_desc_count--;
  1259. }
  1260. }
  1261. if (rxq->rx_desc_count) {
  1262. dev_printk(KERN_ERR, &mp->dev->dev,
  1263. "error freeing rx ring -- %d skbs stuck\n",
  1264. rxq->rx_desc_count);
  1265. }
  1266. if (rxq->index == mp->rxq_primary &&
  1267. rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
  1268. iounmap(rxq->rx_desc_area);
  1269. else
  1270. dma_free_coherent(NULL, rxq->rx_desc_area_size,
  1271. rxq->rx_desc_area, rxq->rx_desc_dma);
  1272. kfree(rxq->rx_skb);
  1273. }
  1274. static int txq_init(struct mv643xx_eth_private *mp, int index)
  1275. {
  1276. struct tx_queue *txq = mp->txq + index;
  1277. struct tx_desc *tx_desc;
  1278. int size;
  1279. int i;
  1280. txq->index = index;
  1281. txq->tx_ring_size = mp->default_tx_ring_size;
  1282. txq->tx_desc_count = 0;
  1283. txq->tx_curr_desc = 0;
  1284. txq->tx_used_desc = 0;
  1285. size = txq->tx_ring_size * sizeof(struct tx_desc);
  1286. if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) {
  1287. txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
  1288. mp->tx_desc_sram_size);
  1289. txq->tx_desc_dma = mp->tx_desc_sram_addr;
  1290. } else {
  1291. txq->tx_desc_area = dma_alloc_coherent(NULL, size,
  1292. &txq->tx_desc_dma,
  1293. GFP_KERNEL);
  1294. }
  1295. if (txq->tx_desc_area == NULL) {
  1296. dev_printk(KERN_ERR, &mp->dev->dev,
  1297. "can't allocate tx ring (%d bytes)\n", size);
  1298. goto out;
  1299. }
  1300. memset(txq->tx_desc_area, 0, size);
  1301. txq->tx_desc_area_size = size;
  1302. txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
  1303. GFP_KERNEL);
  1304. if (txq->tx_skb == NULL) {
  1305. dev_printk(KERN_ERR, &mp->dev->dev,
  1306. "can't allocate tx skb ring\n");
  1307. goto out_free;
  1308. }
  1309. tx_desc = (struct tx_desc *)txq->tx_desc_area;
  1310. for (i = 0; i < txq->tx_ring_size; i++) {
  1311. struct tx_desc *txd = tx_desc + i;
  1312. int nexti = (i + 1) % txq->tx_ring_size;
  1313. txd->cmd_sts = 0;
  1314. txd->next_desc_ptr = txq->tx_desc_dma +
  1315. nexti * sizeof(struct tx_desc);
  1316. }
  1317. return 0;
  1318. out_free:
  1319. if (index == mp->txq_primary && size <= mp->tx_desc_sram_size)
  1320. iounmap(txq->tx_desc_area);
  1321. else
  1322. dma_free_coherent(NULL, size,
  1323. txq->tx_desc_area,
  1324. txq->tx_desc_dma);
  1325. out:
  1326. return -ENOMEM;
  1327. }
  1328. static void txq_reclaim(struct tx_queue *txq, int force)
  1329. {
  1330. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  1331. unsigned long flags;
  1332. spin_lock_irqsave(&mp->lock, flags);
  1333. while (txq->tx_desc_count > 0) {
  1334. int tx_index;
  1335. struct tx_desc *desc;
  1336. u32 cmd_sts;
  1337. struct sk_buff *skb;
  1338. dma_addr_t addr;
  1339. int count;
  1340. tx_index = txq->tx_used_desc;
  1341. desc = &txq->tx_desc_area[tx_index];
  1342. cmd_sts = desc->cmd_sts;
  1343. if (cmd_sts & BUFFER_OWNED_BY_DMA) {
  1344. if (!force)
  1345. break;
  1346. desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
  1347. }
  1348. txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size;
  1349. txq->tx_desc_count--;
  1350. addr = desc->buf_ptr;
  1351. count = desc->byte_cnt;
  1352. skb = txq->tx_skb[tx_index];
  1353. txq->tx_skb[tx_index] = NULL;
  1354. if (cmd_sts & ERROR_SUMMARY) {
  1355. dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
  1356. mp->dev->stats.tx_errors++;
  1357. }
  1358. /*
  1359. * Drop mp->lock while we free the skb.
  1360. */
  1361. spin_unlock_irqrestore(&mp->lock, flags);
  1362. if (cmd_sts & TX_FIRST_DESC)
  1363. dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
  1364. else
  1365. dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
  1366. if (skb)
  1367. dev_kfree_skb_irq(skb);
  1368. spin_lock_irqsave(&mp->lock, flags);
  1369. }
  1370. spin_unlock_irqrestore(&mp->lock, flags);
  1371. }
  1372. static void txq_deinit(struct tx_queue *txq)
  1373. {
  1374. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  1375. txq_disable(txq);
  1376. txq_reclaim(txq, 1);
  1377. BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
  1378. if (txq->index == mp->txq_primary &&
  1379. txq->tx_desc_area_size <= mp->tx_desc_sram_size)
  1380. iounmap(txq->tx_desc_area);
  1381. else
  1382. dma_free_coherent(NULL, txq->tx_desc_area_size,
  1383. txq->tx_desc_area, txq->tx_desc_dma);
  1384. kfree(txq->tx_skb);
  1385. }
  1386. /* netdev ops and related ***************************************************/
  1387. static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
  1388. {
  1389. struct net_device *dev = (struct net_device *)dev_id;
  1390. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1391. u32 int_cause;
  1392. u32 int_cause_ext;
  1393. int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
  1394. (INT_TX_END | INT_RX | INT_EXT);
  1395. if (int_cause == 0)
  1396. return IRQ_NONE;
  1397. int_cause_ext = 0;
  1398. if (int_cause & INT_EXT) {
  1399. int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
  1400. & (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
  1401. wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
  1402. }
  1403. if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) {
  1404. if (rdl(mp, PORT_STATUS(mp->port_num)) & LINK_UP) {
  1405. if (!netif_carrier_ok(dev)) {
  1406. netif_carrier_on(dev);
  1407. netif_wake_queue(dev);
  1408. }
  1409. } else if (netif_carrier_ok(dev)) {
  1410. int i;
  1411. netif_stop_queue(dev);
  1412. netif_carrier_off(dev);
  1413. for (i = 0; i < 8; i++) {
  1414. struct tx_queue *txq = mp->txq + i;
  1415. if (mp->txq_mask & (1 << i)) {
  1416. txq_reclaim(txq, 1);
  1417. txq_reset_hw_ptr(txq);
  1418. }
  1419. }
  1420. }
  1421. }
  1422. /*
  1423. * RxBuffer or RxError set for any of the 8 queues?
  1424. */
  1425. #ifdef MV643XX_ETH_NAPI
  1426. if (int_cause & INT_RX) {
  1427. wrl(mp, INT_MASK(mp->port_num), 0x00000000);
  1428. rdl(mp, INT_MASK(mp->port_num));
  1429. netif_rx_schedule(dev, &mp->napi);
  1430. }
  1431. #else
  1432. if (int_cause & INT_RX) {
  1433. int i;
  1434. for (i = 7; i >= 0; i--)
  1435. if (mp->rxq_mask & (1 << i))
  1436. rxq_process(mp->rxq + i, INT_MAX);
  1437. }
  1438. #endif
  1439. /*
  1440. * TxBuffer or TxError set for any of the 8 queues?
  1441. */
  1442. if (int_cause_ext & INT_EXT_TX) {
  1443. int i;
  1444. for (i = 0; i < 8; i++)
  1445. if (mp->txq_mask & (1 << i))
  1446. txq_reclaim(mp->txq + i, 0);
  1447. /*
  1448. * Enough space again in the primary TX queue for a
  1449. * full packet?
  1450. */
  1451. if (netif_carrier_ok(dev)) {
  1452. spin_lock(&mp->lock);
  1453. __txq_maybe_wake(mp->txq + mp->txq_primary);
  1454. spin_unlock(&mp->lock);
  1455. }
  1456. }
  1457. /*
  1458. * Any TxEnd interrupts?
  1459. */
  1460. if (int_cause & INT_TX_END) {
  1461. int i;
  1462. wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END));
  1463. spin_lock(&mp->lock);
  1464. for (i = 0; i < 8; i++) {
  1465. struct tx_queue *txq = mp->txq + i;
  1466. u32 hw_desc_ptr;
  1467. u32 expected_ptr;
  1468. if ((int_cause & (INT_TX_END_0 << i)) == 0)
  1469. continue;
  1470. hw_desc_ptr =
  1471. rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, i));
  1472. expected_ptr = (u32)txq->tx_desc_dma +
  1473. txq->tx_curr_desc * sizeof(struct tx_desc);
  1474. if (hw_desc_ptr != expected_ptr)
  1475. txq_enable(txq);
  1476. }
  1477. spin_unlock(&mp->lock);
  1478. }
  1479. return IRQ_HANDLED;
  1480. }
  1481. static void phy_reset(struct mv643xx_eth_private *mp)
  1482. {
  1483. unsigned int data;
  1484. smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
  1485. data |= BMCR_RESET;
  1486. smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
  1487. do {
  1488. udelay(1);
  1489. smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
  1490. } while (data & BMCR_RESET);
  1491. }
  1492. static void port_start(struct mv643xx_eth_private *mp)
  1493. {
  1494. u32 pscr;
  1495. int i;
  1496. /*
  1497. * Perform PHY reset, if there is a PHY.
  1498. */
  1499. if (mp->phy_addr != -1) {
  1500. struct ethtool_cmd cmd;
  1501. mv643xx_eth_get_settings(mp->dev, &cmd);
  1502. phy_reset(mp);
  1503. mv643xx_eth_set_settings(mp->dev, &cmd);
  1504. }
  1505. /*
  1506. * Configure basic link parameters.
  1507. */
  1508. pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
  1509. pscr |= SERIAL_PORT_ENABLE;
  1510. wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
  1511. pscr |= DO_NOT_FORCE_LINK_FAIL;
  1512. if (mp->phy_addr == -1)
  1513. pscr |= FORCE_LINK_PASS;
  1514. wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
  1515. wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
  1516. /*
  1517. * Configure TX path and queues.
  1518. */
  1519. tx_set_rate(mp, 1000000000, 16777216);
  1520. for (i = 0; i < 8; i++) {
  1521. struct tx_queue *txq = mp->txq + i;
  1522. if ((mp->txq_mask & (1 << i)) == 0)
  1523. continue;
  1524. txq_reset_hw_ptr(txq);
  1525. txq_set_rate(txq, 1000000000, 16777216);
  1526. txq_set_fixed_prio_mode(txq);
  1527. }
  1528. /*
  1529. * Add configured unicast address to address filter table.
  1530. */
  1531. uc_addr_set(mp, mp->dev->dev_addr);
  1532. /*
  1533. * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
  1534. * frames to RX queue #0.
  1535. */
  1536. wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000);
  1537. /*
  1538. * Treat BPDUs as normal multicasts, and disable partition mode.
  1539. */
  1540. wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000);
  1541. /*
  1542. * Enable the receive queues.
  1543. */
  1544. for (i = 0; i < 8; i++) {
  1545. struct rx_queue *rxq = mp->rxq + i;
  1546. int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
  1547. u32 addr;
  1548. if ((mp->rxq_mask & (1 << i)) == 0)
  1549. continue;
  1550. addr = (u32)rxq->rx_desc_dma;
  1551. addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
  1552. wrl(mp, off, addr);
  1553. rxq_enable(rxq);
  1554. }
  1555. }
  1556. static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
  1557. {
  1558. unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
  1559. u32 val;
  1560. val = rdl(mp, SDMA_CONFIG(mp->port_num));
  1561. if (mp->shared->extended_rx_coal_limit) {
  1562. if (coal > 0xffff)
  1563. coal = 0xffff;
  1564. val &= ~0x023fff80;
  1565. val |= (coal & 0x8000) << 10;
  1566. val |= (coal & 0x7fff) << 7;
  1567. } else {
  1568. if (coal > 0x3fff)
  1569. coal = 0x3fff;
  1570. val &= ~0x003fff00;
  1571. val |= (coal & 0x3fff) << 8;
  1572. }
  1573. wrl(mp, SDMA_CONFIG(mp->port_num), val);
  1574. }
  1575. static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
  1576. {
  1577. unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
  1578. if (coal > 0x3fff)
  1579. coal = 0x3fff;
  1580. wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4);
  1581. }
  1582. static int mv643xx_eth_open(struct net_device *dev)
  1583. {
  1584. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1585. int err;
  1586. int i;
  1587. wrl(mp, INT_CAUSE(mp->port_num), 0);
  1588. wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
  1589. rdl(mp, INT_CAUSE_EXT(mp->port_num));
  1590. err = request_irq(dev->irq, mv643xx_eth_irq,
  1591. IRQF_SHARED | IRQF_SAMPLE_RANDOM,
  1592. dev->name, dev);
  1593. if (err) {
  1594. dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
  1595. return -EAGAIN;
  1596. }
  1597. init_mac_tables(mp);
  1598. for (i = 0; i < 8; i++) {
  1599. if ((mp->rxq_mask & (1 << i)) == 0)
  1600. continue;
  1601. err = rxq_init(mp, i);
  1602. if (err) {
  1603. while (--i >= 0)
  1604. if (mp->rxq_mask & (1 << i))
  1605. rxq_deinit(mp->rxq + i);
  1606. goto out;
  1607. }
  1608. rxq_refill(mp->rxq + i);
  1609. }
  1610. for (i = 0; i < 8; i++) {
  1611. if ((mp->txq_mask & (1 << i)) == 0)
  1612. continue;
  1613. err = txq_init(mp, i);
  1614. if (err) {
  1615. while (--i >= 0)
  1616. if (mp->txq_mask & (1 << i))
  1617. txq_deinit(mp->txq + i);
  1618. goto out_free;
  1619. }
  1620. }
  1621. #ifdef MV643XX_ETH_NAPI
  1622. napi_enable(&mp->napi);
  1623. #endif
  1624. port_start(mp);
  1625. set_rx_coal(mp, 0);
  1626. set_tx_coal(mp, 0);
  1627. wrl(mp, INT_MASK_EXT(mp->port_num),
  1628. INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
  1629. wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
  1630. return 0;
  1631. out_free:
  1632. for (i = 0; i < 8; i++)
  1633. if (mp->rxq_mask & (1 << i))
  1634. rxq_deinit(mp->rxq + i);
  1635. out:
  1636. free_irq(dev->irq, dev);
  1637. return err;
  1638. }
  1639. static void port_reset(struct mv643xx_eth_private *mp)
  1640. {
  1641. unsigned int data;
  1642. int i;
  1643. for (i = 0; i < 8; i++) {
  1644. if (mp->rxq_mask & (1 << i))
  1645. rxq_disable(mp->rxq + i);
  1646. if (mp->txq_mask & (1 << i))
  1647. txq_disable(mp->txq + i);
  1648. }
  1649. while (1) {
  1650. u32 ps = rdl(mp, PORT_STATUS(mp->port_num));
  1651. if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
  1652. break;
  1653. udelay(10);
  1654. }
  1655. /* Reset the Enable bit in the Configuration Register */
  1656. data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
  1657. data &= ~(SERIAL_PORT_ENABLE |
  1658. DO_NOT_FORCE_LINK_FAIL |
  1659. FORCE_LINK_PASS);
  1660. wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), data);
  1661. }
  1662. static int mv643xx_eth_stop(struct net_device *dev)
  1663. {
  1664. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1665. int i;
  1666. wrl(mp, INT_MASK(mp->port_num), 0x00000000);
  1667. rdl(mp, INT_MASK(mp->port_num));
  1668. #ifdef MV643XX_ETH_NAPI
  1669. napi_disable(&mp->napi);
  1670. #endif
  1671. netif_carrier_off(dev);
  1672. netif_stop_queue(dev);
  1673. free_irq(dev->irq, dev);
  1674. port_reset(mp);
  1675. mib_counters_update(mp);
  1676. for (i = 0; i < 8; i++) {
  1677. if (mp->rxq_mask & (1 << i))
  1678. rxq_deinit(mp->rxq + i);
  1679. if (mp->txq_mask & (1 << i))
  1680. txq_deinit(mp->txq + i);
  1681. }
  1682. return 0;
  1683. }
  1684. static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1685. {
  1686. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1687. if (mp->phy_addr != -1)
  1688. return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);
  1689. return -EOPNOTSUPP;
  1690. }
  1691. static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
  1692. {
  1693. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1694. if (new_mtu < 64 || new_mtu > 9500)
  1695. return -EINVAL;
  1696. dev->mtu = new_mtu;
  1697. tx_set_rate(mp, 1000000000, 16777216);
  1698. if (!netif_running(dev))
  1699. return 0;
  1700. /*
  1701. * Stop and then re-open the interface. This will allocate RX
  1702. * skbs of the new MTU.
  1703. * There is a possible danger that the open will not succeed,
  1704. * due to memory being full.
  1705. */
  1706. mv643xx_eth_stop(dev);
  1707. if (mv643xx_eth_open(dev)) {
  1708. dev_printk(KERN_ERR, &dev->dev,
  1709. "fatal error on re-opening device after "
  1710. "MTU change\n");
  1711. }
  1712. return 0;
  1713. }
  1714. static void tx_timeout_task(struct work_struct *ugly)
  1715. {
  1716. struct mv643xx_eth_private *mp;
  1717. mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
  1718. if (netif_running(mp->dev)) {
  1719. netif_stop_queue(mp->dev);
  1720. port_reset(mp);
  1721. port_start(mp);
  1722. __txq_maybe_wake(mp->txq + mp->txq_primary);
  1723. }
  1724. }
  1725. static void mv643xx_eth_tx_timeout(struct net_device *dev)
  1726. {
  1727. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1728. dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
  1729. schedule_work(&mp->tx_timeout_task);
  1730. }
  1731. #ifdef CONFIG_NET_POLL_CONTROLLER
  1732. static void mv643xx_eth_netpoll(struct net_device *dev)
  1733. {
  1734. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1735. wrl(mp, INT_MASK(mp->port_num), 0x00000000);
  1736. rdl(mp, INT_MASK(mp->port_num));
  1737. mv643xx_eth_irq(dev->irq, dev);
  1738. wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
  1739. }
  1740. #endif
  1741. static int mv643xx_eth_mdio_read(struct net_device *dev, int addr, int reg)
  1742. {
  1743. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1744. int val;
  1745. smi_reg_read(mp, addr, reg, &val);
  1746. return val;
  1747. }
  1748. static void mv643xx_eth_mdio_write(struct net_device *dev, int addr, int reg, int val)
  1749. {
  1750. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1751. smi_reg_write(mp, addr, reg, val);
  1752. }
  1753. /* platform glue ************************************************************/
  1754. static void
  1755. mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
  1756. struct mbus_dram_target_info *dram)
  1757. {
  1758. void __iomem *base = msp->base;
  1759. u32 win_enable;
  1760. u32 win_protect;
  1761. int i;
  1762. for (i = 0; i < 6; i++) {
  1763. writel(0, base + WINDOW_BASE(i));
  1764. writel(0, base + WINDOW_SIZE(i));
  1765. if (i < 4)
  1766. writel(0, base + WINDOW_REMAP_HIGH(i));
  1767. }
  1768. win_enable = 0x3f;
  1769. win_protect = 0;
  1770. for (i = 0; i < dram->num_cs; i++) {
  1771. struct mbus_dram_window *cs = dram->cs + i;
  1772. writel((cs->base & 0xffff0000) |
  1773. (cs->mbus_attr << 8) |
  1774. dram->mbus_dram_target_id, base + WINDOW_BASE(i));
  1775. writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
  1776. win_enable &= ~(1 << i);
  1777. win_protect |= 3 << (2 * i);
  1778. }
  1779. writel(win_enable, base + WINDOW_BAR_ENABLE);
  1780. msp->win_protect = win_protect;
  1781. }
  1782. static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
  1783. {
  1784. /*
  1785. * Check whether we have a 14-bit coal limit field in bits
  1786. * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
  1787. * SDMA config register.
  1788. */
  1789. writel(0x02000000, msp->base + SDMA_CONFIG(0));
  1790. if (readl(msp->base + SDMA_CONFIG(0)) & 0x02000000)
  1791. msp->extended_rx_coal_limit = 1;
  1792. else
  1793. msp->extended_rx_coal_limit = 0;
  1794. /*
  1795. * Check whether the TX rate control registers are in the
  1796. * old or the new place.
  1797. */
  1798. writel(1, msp->base + TX_BW_MTU_MOVED(0));
  1799. if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1)
  1800. msp->tx_bw_control_moved = 1;
  1801. else
  1802. msp->tx_bw_control_moved = 0;
  1803. }
  1804. static int mv643xx_eth_shared_probe(struct platform_device *pdev)
  1805. {
  1806. static int mv643xx_eth_version_printed = 0;
  1807. struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
  1808. struct mv643xx_eth_shared_private *msp;
  1809. struct resource *res;
  1810. int ret;
  1811. if (!mv643xx_eth_version_printed++)
  1812. printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
  1813. "driver version %s\n", mv643xx_eth_driver_version);
  1814. ret = -EINVAL;
  1815. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1816. if (res == NULL)
  1817. goto out;
  1818. ret = -ENOMEM;
  1819. msp = kmalloc(sizeof(*msp), GFP_KERNEL);
  1820. if (msp == NULL)
  1821. goto out;
  1822. memset(msp, 0, sizeof(*msp));
  1823. msp->base = ioremap(res->start, res->end - res->start + 1);
  1824. if (msp->base == NULL)
  1825. goto out_free;
  1826. spin_lock_init(&msp->phy_lock);
  1827. /*
  1828. * (Re-)program MBUS remapping windows if we are asked to.
  1829. */
  1830. if (pd != NULL && pd->dram != NULL)
  1831. mv643xx_eth_conf_mbus_windows(msp, pd->dram);
  1832. /*
  1833. * Detect hardware parameters.
  1834. */
  1835. msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
  1836. infer_hw_params(msp);
  1837. platform_set_drvdata(pdev, msp);
  1838. return 0;
  1839. out_free:
  1840. kfree(msp);
  1841. out:
  1842. return ret;
  1843. }
  1844. static int mv643xx_eth_shared_remove(struct platform_device *pdev)
  1845. {
  1846. struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
  1847. iounmap(msp->base);
  1848. kfree(msp);
  1849. return 0;
  1850. }
  1851. static struct platform_driver mv643xx_eth_shared_driver = {
  1852. .probe = mv643xx_eth_shared_probe,
  1853. .remove = mv643xx_eth_shared_remove,
  1854. .driver = {
  1855. .name = MV643XX_ETH_SHARED_NAME,
  1856. .owner = THIS_MODULE,
  1857. },
  1858. };
  1859. static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
  1860. {
  1861. int addr_shift = 5 * mp->port_num;
  1862. u32 data;
  1863. data = rdl(mp, PHY_ADDR);
  1864. data &= ~(0x1f << addr_shift);
  1865. data |= (phy_addr & 0x1f) << addr_shift;
  1866. wrl(mp, PHY_ADDR, data);
  1867. }
  1868. static int phy_addr_get(struct mv643xx_eth_private *mp)
  1869. {
  1870. unsigned int data;
  1871. data = rdl(mp, PHY_ADDR);
  1872. return (data >> (5 * mp->port_num)) & 0x1f;
  1873. }
  1874. static void set_params(struct mv643xx_eth_private *mp,
  1875. struct mv643xx_eth_platform_data *pd)
  1876. {
  1877. struct net_device *dev = mp->dev;
  1878. if (is_valid_ether_addr(pd->mac_addr))
  1879. memcpy(dev->dev_addr, pd->mac_addr, 6);
  1880. else
  1881. uc_addr_get(mp, dev->dev_addr);
  1882. if (pd->phy_addr == -1) {
  1883. mp->shared_smi = NULL;
  1884. mp->phy_addr = -1;
  1885. } else {
  1886. mp->shared_smi = mp->shared;
  1887. if (pd->shared_smi != NULL)
  1888. mp->shared_smi = platform_get_drvdata(pd->shared_smi);
  1889. if (pd->force_phy_addr || pd->phy_addr) {
  1890. mp->phy_addr = pd->phy_addr & 0x3f;
  1891. phy_addr_set(mp, mp->phy_addr);
  1892. } else {
  1893. mp->phy_addr = phy_addr_get(mp);
  1894. }
  1895. }
  1896. mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
  1897. if (pd->rx_queue_size)
  1898. mp->default_rx_ring_size = pd->rx_queue_size;
  1899. mp->rx_desc_sram_addr = pd->rx_sram_addr;
  1900. mp->rx_desc_sram_size = pd->rx_sram_size;
  1901. if (pd->rx_queue_mask)
  1902. mp->rxq_mask = pd->rx_queue_mask;
  1903. else
  1904. mp->rxq_mask = 0x01;
  1905. mp->rxq_primary = fls(mp->rxq_mask) - 1;
  1906. mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
  1907. if (pd->tx_queue_size)
  1908. mp->default_tx_ring_size = pd->tx_queue_size;
  1909. mp->tx_desc_sram_addr = pd->tx_sram_addr;
  1910. mp->tx_desc_sram_size = pd->tx_sram_size;
  1911. if (pd->tx_queue_mask)
  1912. mp->txq_mask = pd->tx_queue_mask;
  1913. else
  1914. mp->txq_mask = 0x01;
  1915. mp->txq_primary = fls(mp->txq_mask) - 1;
  1916. }
  1917. static int phy_detect(struct mv643xx_eth_private *mp)
  1918. {
  1919. unsigned int data;
  1920. unsigned int data2;
  1921. smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
  1922. smi_reg_write(mp, mp->phy_addr, MII_BMCR, data ^ BMCR_ANENABLE);
  1923. smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data2);
  1924. if (((data ^ data2) & BMCR_ANENABLE) == 0)
  1925. return -ENODEV;
  1926. smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
  1927. return 0;
  1928. }
  1929. static int phy_init(struct mv643xx_eth_private *mp,
  1930. struct mv643xx_eth_platform_data *pd)
  1931. {
  1932. struct ethtool_cmd cmd;
  1933. int err;
  1934. err = phy_detect(mp);
  1935. if (err) {
  1936. dev_printk(KERN_INFO, &mp->dev->dev,
  1937. "no PHY detected at addr %d\n", mp->phy_addr);
  1938. return err;
  1939. }
  1940. phy_reset(mp);
  1941. mp->mii.phy_id = mp->phy_addr;
  1942. mp->mii.phy_id_mask = 0x3f;
  1943. mp->mii.reg_num_mask = 0x1f;
  1944. mp->mii.dev = mp->dev;
  1945. mp->mii.mdio_read = mv643xx_eth_mdio_read;
  1946. mp->mii.mdio_write = mv643xx_eth_mdio_write;
  1947. mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
  1948. memset(&cmd, 0, sizeof(cmd));
  1949. cmd.port = PORT_MII;
  1950. cmd.transceiver = XCVR_INTERNAL;
  1951. cmd.phy_address = mp->phy_addr;
  1952. if (pd->speed == 0) {
  1953. cmd.autoneg = AUTONEG_ENABLE;
  1954. cmd.speed = SPEED_100;
  1955. cmd.advertising = ADVERTISED_10baseT_Half |
  1956. ADVERTISED_10baseT_Full |
  1957. ADVERTISED_100baseT_Half |
  1958. ADVERTISED_100baseT_Full;
  1959. if (mp->mii.supports_gmii)
  1960. cmd.advertising |= ADVERTISED_1000baseT_Full;
  1961. } else {
  1962. cmd.autoneg = AUTONEG_DISABLE;
  1963. cmd.speed = pd->speed;
  1964. cmd.duplex = pd->duplex;
  1965. }
  1966. mv643xx_eth_set_settings(mp->dev, &cmd);
  1967. return 0;
  1968. }
  1969. static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
  1970. {
  1971. u32 pscr;
  1972. pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
  1973. if (pscr & SERIAL_PORT_ENABLE) {
  1974. pscr &= ~SERIAL_PORT_ENABLE;
  1975. wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
  1976. }
  1977. pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
  1978. if (mp->phy_addr == -1) {
  1979. pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
  1980. if (speed == SPEED_1000)
  1981. pscr |= SET_GMII_SPEED_TO_1000;
  1982. else if (speed == SPEED_100)
  1983. pscr |= SET_MII_SPEED_TO_100;
  1984. pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
  1985. pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
  1986. if (duplex == DUPLEX_FULL)
  1987. pscr |= SET_FULL_DUPLEX_MODE;
  1988. }
  1989. wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
  1990. }
  1991. static int mv643xx_eth_probe(struct platform_device *pdev)
  1992. {
  1993. struct mv643xx_eth_platform_data *pd;
  1994. struct mv643xx_eth_private *mp;
  1995. struct net_device *dev;
  1996. struct resource *res;
  1997. DECLARE_MAC_BUF(mac);
  1998. int err;
  1999. pd = pdev->dev.platform_data;
  2000. if (pd == NULL) {
  2001. dev_printk(KERN_ERR, &pdev->dev,
  2002. "no mv643xx_eth_platform_data\n");
  2003. return -ENODEV;
  2004. }
  2005. if (pd->shared == NULL) {
  2006. dev_printk(KERN_ERR, &pdev->dev,
  2007. "no mv643xx_eth_platform_data->shared\n");
  2008. return -ENODEV;
  2009. }
  2010. dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
  2011. if (!dev)
  2012. return -ENOMEM;
  2013. mp = netdev_priv(dev);
  2014. platform_set_drvdata(pdev, mp);
  2015. mp->shared = platform_get_drvdata(pd->shared);
  2016. mp->port_num = pd->port_number;
  2017. mp->dev = dev;
  2018. #ifdef MV643XX_ETH_NAPI
  2019. netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
  2020. #endif
  2021. set_params(mp, pd);
  2022. spin_lock_init(&mp->lock);
  2023. mib_counters_clear(mp);
  2024. INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
  2025. if (mp->phy_addr != -1) {
  2026. err = phy_init(mp, pd);
  2027. if (err)
  2028. goto out;
  2029. SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
  2030. } else {
  2031. SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
  2032. }
  2033. init_pscr(mp, pd->speed, pd->duplex);
  2034. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  2035. BUG_ON(!res);
  2036. dev->irq = res->start;
  2037. dev->hard_start_xmit = mv643xx_eth_xmit;
  2038. dev->open = mv643xx_eth_open;
  2039. dev->stop = mv643xx_eth_stop;
  2040. dev->set_multicast_list = mv643xx_eth_set_rx_mode;
  2041. dev->set_mac_address = mv643xx_eth_set_mac_address;
  2042. dev->do_ioctl = mv643xx_eth_ioctl;
  2043. dev->change_mtu = mv643xx_eth_change_mtu;
  2044. dev->tx_timeout = mv643xx_eth_tx_timeout;
  2045. #ifdef CONFIG_NET_POLL_CONTROLLER
  2046. dev->poll_controller = mv643xx_eth_netpoll;
  2047. #endif
  2048. dev->watchdog_timeo = 2 * HZ;
  2049. dev->base_addr = 0;
  2050. #ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
  2051. /*
  2052. * Zero copy can only work if we use Discovery II memory. Else, we will
  2053. * have to map the buffers to ISA memory which is only 16 MB
  2054. */
  2055. dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
  2056. #endif
  2057. SET_NETDEV_DEV(dev, &pdev->dev);
  2058. if (mp->shared->win_protect)
  2059. wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
  2060. err = register_netdev(dev);
  2061. if (err)
  2062. goto out;
  2063. dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n",
  2064. mp->port_num, print_mac(mac, dev->dev_addr));
  2065. if (dev->features & NETIF_F_SG)
  2066. dev_printk(KERN_NOTICE, &dev->dev, "scatter/gather enabled\n");
  2067. if (dev->features & NETIF_F_IP_CSUM)
  2068. dev_printk(KERN_NOTICE, &dev->dev, "tx checksum offload\n");
  2069. #ifdef MV643XX_ETH_NAPI
  2070. dev_printk(KERN_NOTICE, &dev->dev, "napi enabled\n");
  2071. #endif
  2072. if (mp->tx_desc_sram_size > 0)
  2073. dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
  2074. return 0;
  2075. out:
  2076. free_netdev(dev);
  2077. return err;
  2078. }
  2079. static int mv643xx_eth_remove(struct platform_device *pdev)
  2080. {
  2081. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2082. unregister_netdev(mp->dev);
  2083. flush_scheduled_work();
  2084. free_netdev(mp->dev);
  2085. platform_set_drvdata(pdev, NULL);
  2086. return 0;
  2087. }
  2088. static void mv643xx_eth_shutdown(struct platform_device *pdev)
  2089. {
  2090. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2091. /* Mask all interrupts on ethernet port */
  2092. wrl(mp, INT_MASK(mp->port_num), 0);
  2093. rdl(mp, INT_MASK(mp->port_num));
  2094. if (netif_running(mp->dev))
  2095. port_reset(mp);
  2096. }
  2097. static struct platform_driver mv643xx_eth_driver = {
  2098. .probe = mv643xx_eth_probe,
  2099. .remove = mv643xx_eth_remove,
  2100. .shutdown = mv643xx_eth_shutdown,
  2101. .driver = {
  2102. .name = MV643XX_ETH_NAME,
  2103. .owner = THIS_MODULE,
  2104. },
  2105. };
  2106. static int __init mv643xx_eth_init_module(void)
  2107. {
  2108. int rc;
  2109. rc = platform_driver_register(&mv643xx_eth_shared_driver);
  2110. if (!rc) {
  2111. rc = platform_driver_register(&mv643xx_eth_driver);
  2112. if (rc)
  2113. platform_driver_unregister(&mv643xx_eth_shared_driver);
  2114. }
  2115. return rc;
  2116. }
  2117. module_init(mv643xx_eth_init_module);
  2118. static void __exit mv643xx_eth_cleanup_module(void)
  2119. {
  2120. platform_driver_unregister(&mv643xx_eth_driver);
  2121. platform_driver_unregister(&mv643xx_eth_shared_driver);
  2122. }
  2123. module_exit(mv643xx_eth_cleanup_module);
  2124. MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
  2125. "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
  2126. MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
  2127. MODULE_LICENSE("GPL");
  2128. MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
  2129. MODULE_ALIAS("platform:" MV643XX_ETH_NAME);