mv643xx_eth.c 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019
  1. /*
  2. * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
  3. * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
  4. *
  5. * Based on the 64360 driver from:
  6. * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
  7. * Rabeeh Khoury <rabeeh@marvell.com>
  8. *
  9. * Copyright (C) 2003 PMC-Sierra, Inc.,
  10. * written by Manish Lachwani
  11. *
  12. * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
  13. *
  14. * Copyright (C) 2004-2006 MontaVista Software, Inc.
  15. * Dale Farnsworth <dale@farnsworth.org>
  16. *
  17. * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
  18. * <sjhill@realitydiluted.com>
  19. *
  20. * Copyright (C) 2007-2008 Marvell Semiconductor
  21. * Lennert Buytenhek <buytenh@marvell.com>
  22. *
  23. * This program is free software; you can redistribute it and/or
  24. * modify it under the terms of the GNU General Public License
  25. * as published by the Free Software Foundation; either version 2
  26. * of the License, or (at your option) any later version.
  27. *
  28. * This program is distributed in the hope that it will be useful,
  29. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  31. * GNU General Public License for more details.
  32. *
  33. * You should have received a copy of the GNU General Public License
  34. * along with this program; if not, write to the Free Software
  35. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  36. */
  37. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  38. #include <linux/init.h>
  39. #include <linux/dma-mapping.h>
  40. #include <linux/in.h>
  41. #include <linux/ip.h>
  42. #include <linux/tcp.h>
  43. #include <linux/udp.h>
  44. #include <linux/etherdevice.h>
  45. #include <linux/delay.h>
  46. #include <linux/ethtool.h>
  47. #include <linux/platform_device.h>
  48. #include <linux/module.h>
  49. #include <linux/kernel.h>
  50. #include <linux/spinlock.h>
  51. #include <linux/workqueue.h>
  52. #include <linux/phy.h>
  53. #include <linux/mv643xx_eth.h>
  54. #include <linux/io.h>
  55. #include <linux/types.h>
  56. #include <linux/inet_lro.h>
  57. #include <linux/slab.h>
  58. #include <asm/system.h>
  59. static char mv643xx_eth_driver_name[] = "mv643xx_eth";
  60. static char mv643xx_eth_driver_version[] = "1.4";
  61. /*
  62. * Registers shared between all ports.
  63. */
  64. #define PHY_ADDR 0x0000
  65. #define SMI_REG 0x0004
  66. #define SMI_BUSY 0x10000000
  67. #define SMI_READ_VALID 0x08000000
  68. #define SMI_OPCODE_READ 0x04000000
  69. #define SMI_OPCODE_WRITE 0x00000000
  70. #define ERR_INT_CAUSE 0x0080
  71. #define ERR_INT_SMI_DONE 0x00000010
  72. #define ERR_INT_MASK 0x0084
  73. #define WINDOW_BASE(w) (0x0200 + ((w) << 3))
  74. #define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
  75. #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
  76. #define WINDOW_BAR_ENABLE 0x0290
  77. #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
  78. /*
  79. * Main per-port registers. These live at offset 0x0400 for
  80. * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
  81. */
  82. #define PORT_CONFIG 0x0000
  83. #define UNICAST_PROMISCUOUS_MODE 0x00000001
  84. #define PORT_CONFIG_EXT 0x0004
  85. #define MAC_ADDR_LOW 0x0014
  86. #define MAC_ADDR_HIGH 0x0018
  87. #define SDMA_CONFIG 0x001c
  88. #define TX_BURST_SIZE_16_64BIT 0x01000000
  89. #define TX_BURST_SIZE_4_64BIT 0x00800000
  90. #define BLM_TX_NO_SWAP 0x00000020
  91. #define BLM_RX_NO_SWAP 0x00000010
  92. #define RX_BURST_SIZE_16_64BIT 0x00000008
  93. #define RX_BURST_SIZE_4_64BIT 0x00000004
  94. #define PORT_SERIAL_CONTROL 0x003c
  95. #define SET_MII_SPEED_TO_100 0x01000000
  96. #define SET_GMII_SPEED_TO_1000 0x00800000
  97. #define SET_FULL_DUPLEX_MODE 0x00200000
  98. #define MAX_RX_PACKET_9700BYTE 0x000a0000
  99. #define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
  100. #define DO_NOT_FORCE_LINK_FAIL 0x00000400
  101. #define SERIAL_PORT_CONTROL_RESERVED 0x00000200
  102. #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
  103. #define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
  104. #define FORCE_LINK_PASS 0x00000002
  105. #define SERIAL_PORT_ENABLE 0x00000001
  106. #define PORT_STATUS 0x0044
  107. #define TX_FIFO_EMPTY 0x00000400
  108. #define TX_IN_PROGRESS 0x00000080
  109. #define PORT_SPEED_MASK 0x00000030
  110. #define PORT_SPEED_1000 0x00000010
  111. #define PORT_SPEED_100 0x00000020
  112. #define PORT_SPEED_10 0x00000000
  113. #define FLOW_CONTROL_ENABLED 0x00000008
  114. #define FULL_DUPLEX 0x00000004
  115. #define LINK_UP 0x00000002
  116. #define TXQ_COMMAND 0x0048
  117. #define TXQ_FIX_PRIO_CONF 0x004c
  118. #define TX_BW_RATE 0x0050
  119. #define TX_BW_MTU 0x0058
  120. #define TX_BW_BURST 0x005c
  121. #define INT_CAUSE 0x0060
  122. #define INT_TX_END 0x07f80000
  123. #define INT_TX_END_0 0x00080000
  124. #define INT_RX 0x000003fc
  125. #define INT_RX_0 0x00000004
  126. #define INT_EXT 0x00000002
  127. #define INT_CAUSE_EXT 0x0064
  128. #define INT_EXT_LINK_PHY 0x00110000
  129. #define INT_EXT_TX 0x000000ff
  130. #define INT_MASK 0x0068
  131. #define INT_MASK_EXT 0x006c
  132. #define TX_FIFO_URGENT_THRESHOLD 0x0074
  133. #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
  134. #define TX_BW_RATE_MOVED 0x00e0
  135. #define TX_BW_MTU_MOVED 0x00e8
  136. #define TX_BW_BURST_MOVED 0x00ec
  137. #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
  138. #define RXQ_COMMAND 0x0280
  139. #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
  140. #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
  141. #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
  142. #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
  143. /*
  144. * Misc per-port registers.
  145. */
  146. #define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
  147. #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
  148. #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
  149. #define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
  150. /*
  151. * SDMA configuration register default value.
  152. */
  153. #if defined(__BIG_ENDIAN)
  154. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  155. (RX_BURST_SIZE_4_64BIT | \
  156. TX_BURST_SIZE_4_64BIT)
  157. #elif defined(__LITTLE_ENDIAN)
  158. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  159. (RX_BURST_SIZE_4_64BIT | \
  160. BLM_RX_NO_SWAP | \
  161. BLM_TX_NO_SWAP | \
  162. TX_BURST_SIZE_4_64BIT)
  163. #else
  164. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  165. #endif
  166. /*
  167. * Misc definitions.
  168. */
  169. #define DEFAULT_RX_QUEUE_SIZE 128
  170. #define DEFAULT_TX_QUEUE_SIZE 256
  171. #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
  172. /*
  173. * RX/TX descriptors.
  174. */
  175. #if defined(__BIG_ENDIAN)
  176. struct rx_desc {
  177. u16 byte_cnt; /* Descriptor buffer byte count */
  178. u16 buf_size; /* Buffer size */
  179. u32 cmd_sts; /* Descriptor command status */
  180. u32 next_desc_ptr; /* Next descriptor pointer */
  181. u32 buf_ptr; /* Descriptor buffer pointer */
  182. };
  183. struct tx_desc {
  184. u16 byte_cnt; /* buffer byte count */
  185. u16 l4i_chk; /* CPU provided TCP checksum */
  186. u32 cmd_sts; /* Command/status field */
  187. u32 next_desc_ptr; /* Pointer to next descriptor */
  188. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  189. };
  190. #elif defined(__LITTLE_ENDIAN)
  191. struct rx_desc {
  192. u32 cmd_sts; /* Descriptor command status */
  193. u16 buf_size; /* Buffer size */
  194. u16 byte_cnt; /* Descriptor buffer byte count */
  195. u32 buf_ptr; /* Descriptor buffer pointer */
  196. u32 next_desc_ptr; /* Next descriptor pointer */
  197. };
  198. struct tx_desc {
  199. u32 cmd_sts; /* Command/status field */
  200. u16 l4i_chk; /* CPU provided TCP checksum */
  201. u16 byte_cnt; /* buffer byte count */
  202. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  203. u32 next_desc_ptr; /* Pointer to next descriptor */
  204. };
  205. #else
  206. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  207. #endif
  208. /* RX & TX descriptor command */
  209. #define BUFFER_OWNED_BY_DMA 0x80000000
  210. /* RX & TX descriptor status */
  211. #define ERROR_SUMMARY 0x00000001
  212. /* RX descriptor status */
  213. #define LAYER_4_CHECKSUM_OK 0x40000000
  214. #define RX_ENABLE_INTERRUPT 0x20000000
  215. #define RX_FIRST_DESC 0x08000000
  216. #define RX_LAST_DESC 0x04000000
  217. #define RX_IP_HDR_OK 0x02000000
  218. #define RX_PKT_IS_IPV4 0x01000000
  219. #define RX_PKT_IS_ETHERNETV2 0x00800000
  220. #define RX_PKT_LAYER4_TYPE_MASK 0x00600000
  221. #define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
  222. #define RX_PKT_IS_VLAN_TAGGED 0x00080000
  223. /* TX descriptor command */
  224. #define TX_ENABLE_INTERRUPT 0x00800000
  225. #define GEN_CRC 0x00400000
  226. #define TX_FIRST_DESC 0x00200000
  227. #define TX_LAST_DESC 0x00100000
  228. #define ZERO_PADDING 0x00080000
  229. #define GEN_IP_V4_CHECKSUM 0x00040000
  230. #define GEN_TCP_UDP_CHECKSUM 0x00020000
  231. #define UDP_FRAME 0x00010000
  232. #define MAC_HDR_EXTRA_4_BYTES 0x00008000
  233. #define MAC_HDR_EXTRA_8_BYTES 0x00000200
  234. #define TX_IHL_SHIFT 11
  235. /* global *******************************************************************/
  236. struct mv643xx_eth_shared_private {
  237. /*
  238. * Ethernet controller base address.
  239. */
  240. void __iomem *base;
  241. /*
  242. * Points at the right SMI instance to use.
  243. */
  244. struct mv643xx_eth_shared_private *smi;
  245. /*
  246. * Provides access to local SMI interface.
  247. */
  248. struct mii_bus *smi_bus;
  249. /*
  250. * If we have access to the error interrupt pin (which is
  251. * somewhat misnamed as it not only reflects internal errors
  252. * but also reflects SMI completion), use that to wait for
  253. * SMI access completion instead of polling the SMI busy bit.
  254. */
  255. int err_interrupt;
  256. wait_queue_head_t smi_busy_wait;
  257. /*
  258. * Per-port MBUS window access register value.
  259. */
  260. u32 win_protect;
  261. /*
  262. * Hardware-specific parameters.
  263. */
  264. unsigned int t_clk;
  265. int extended_rx_coal_limit;
  266. int tx_bw_control;
  267. int tx_csum_limit;
  268. };
  269. #define TX_BW_CONTROL_ABSENT 0
  270. #define TX_BW_CONTROL_OLD_LAYOUT 1
  271. #define TX_BW_CONTROL_NEW_LAYOUT 2
  272. static int mv643xx_eth_open(struct net_device *dev);
  273. static int mv643xx_eth_stop(struct net_device *dev);
  274. /* per-port *****************************************************************/
  275. struct mib_counters {
  276. u64 good_octets_received;
  277. u32 bad_octets_received;
  278. u32 internal_mac_transmit_err;
  279. u32 good_frames_received;
  280. u32 bad_frames_received;
  281. u32 broadcast_frames_received;
  282. u32 multicast_frames_received;
  283. u32 frames_64_octets;
  284. u32 frames_65_to_127_octets;
  285. u32 frames_128_to_255_octets;
  286. u32 frames_256_to_511_octets;
  287. u32 frames_512_to_1023_octets;
  288. u32 frames_1024_to_max_octets;
  289. u64 good_octets_sent;
  290. u32 good_frames_sent;
  291. u32 excessive_collision;
  292. u32 multicast_frames_sent;
  293. u32 broadcast_frames_sent;
  294. u32 unrec_mac_control_received;
  295. u32 fc_sent;
  296. u32 good_fc_received;
  297. u32 bad_fc_received;
  298. u32 undersize_received;
  299. u32 fragments_received;
  300. u32 oversize_received;
  301. u32 jabber_received;
  302. u32 mac_receive_error;
  303. u32 bad_crc_event;
  304. u32 collision;
  305. u32 late_collision;
  306. };
  307. struct lro_counters {
  308. u32 lro_aggregated;
  309. u32 lro_flushed;
  310. u32 lro_no_desc;
  311. };
  312. struct rx_queue {
  313. int index;
  314. int rx_ring_size;
  315. int rx_desc_count;
  316. int rx_curr_desc;
  317. int rx_used_desc;
  318. struct rx_desc *rx_desc_area;
  319. dma_addr_t rx_desc_dma;
  320. int rx_desc_area_size;
  321. struct sk_buff **rx_skb;
  322. struct net_lro_mgr lro_mgr;
  323. struct net_lro_desc lro_arr[8];
  324. };
  325. struct tx_queue {
  326. int index;
  327. int tx_ring_size;
  328. int tx_desc_count;
  329. int tx_curr_desc;
  330. int tx_used_desc;
  331. struct tx_desc *tx_desc_area;
  332. dma_addr_t tx_desc_dma;
  333. int tx_desc_area_size;
  334. struct sk_buff_head tx_skb;
  335. unsigned long tx_packets;
  336. unsigned long tx_bytes;
  337. unsigned long tx_dropped;
  338. };
  339. struct mv643xx_eth_private {
  340. struct mv643xx_eth_shared_private *shared;
  341. void __iomem *base;
  342. int port_num;
  343. struct net_device *dev;
  344. struct phy_device *phy;
  345. struct timer_list mib_counters_timer;
  346. spinlock_t mib_counters_lock;
  347. struct mib_counters mib_counters;
  348. struct lro_counters lro_counters;
  349. struct work_struct tx_timeout_task;
  350. struct napi_struct napi;
  351. u32 int_mask;
  352. u8 oom;
  353. u8 work_link;
  354. u8 work_tx;
  355. u8 work_tx_end;
  356. u8 work_rx;
  357. u8 work_rx_refill;
  358. int skb_size;
  359. struct sk_buff_head rx_recycle;
  360. /*
  361. * RX state.
  362. */
  363. int rx_ring_size;
  364. unsigned long rx_desc_sram_addr;
  365. int rx_desc_sram_size;
  366. int rxq_count;
  367. struct timer_list rx_oom;
  368. struct rx_queue rxq[8];
  369. /*
  370. * TX state.
  371. */
  372. int tx_ring_size;
  373. unsigned long tx_desc_sram_addr;
  374. int tx_desc_sram_size;
  375. int txq_count;
  376. struct tx_queue txq[8];
  377. };
  378. /* port register accessors **************************************************/
  379. static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
  380. {
  381. return readl(mp->shared->base + offset);
  382. }
  383. static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
  384. {
  385. return readl(mp->base + offset);
  386. }
  387. static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
  388. {
  389. writel(data, mp->shared->base + offset);
  390. }
  391. static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
  392. {
  393. writel(data, mp->base + offset);
  394. }
  395. /* rxq/txq helper functions *************************************************/
  396. static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
  397. {
  398. return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
  399. }
  400. static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
  401. {
  402. return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
  403. }
  404. static void rxq_enable(struct rx_queue *rxq)
  405. {
  406. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  407. wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
  408. }
  409. static void rxq_disable(struct rx_queue *rxq)
  410. {
  411. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  412. u8 mask = 1 << rxq->index;
  413. wrlp(mp, RXQ_COMMAND, mask << 8);
  414. while (rdlp(mp, RXQ_COMMAND) & mask)
  415. udelay(10);
  416. }
  417. static void txq_reset_hw_ptr(struct tx_queue *txq)
  418. {
  419. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  420. u32 addr;
  421. addr = (u32)txq->tx_desc_dma;
  422. addr += txq->tx_curr_desc * sizeof(struct tx_desc);
  423. wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
  424. }
  425. static void txq_enable(struct tx_queue *txq)
  426. {
  427. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  428. wrlp(mp, TXQ_COMMAND, 1 << txq->index);
  429. }
  430. static void txq_disable(struct tx_queue *txq)
  431. {
  432. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  433. u8 mask = 1 << txq->index;
  434. wrlp(mp, TXQ_COMMAND, mask << 8);
  435. while (rdlp(mp, TXQ_COMMAND) & mask)
  436. udelay(10);
  437. }
  438. static void txq_maybe_wake(struct tx_queue *txq)
  439. {
  440. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  441. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  442. if (netif_tx_queue_stopped(nq)) {
  443. __netif_tx_lock(nq, smp_processor_id());
  444. if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
  445. netif_tx_wake_queue(nq);
  446. __netif_tx_unlock(nq);
  447. }
  448. }
  449. /* rx napi ******************************************************************/
  450. static int
  451. mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
  452. u64 *hdr_flags, void *priv)
  453. {
  454. unsigned long cmd_sts = (unsigned long)priv;
  455. /*
  456. * Make sure that this packet is Ethernet II, is not VLAN
  457. * tagged, is IPv4, has a valid IP header, and is TCP.
  458. */
  459. if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
  460. RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK |
  461. RX_PKT_IS_VLAN_TAGGED)) !=
  462. (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
  463. RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4))
  464. return -1;
  465. skb_reset_network_header(skb);
  466. skb_set_transport_header(skb, ip_hdrlen(skb));
  467. *iphdr = ip_hdr(skb);
  468. *tcph = tcp_hdr(skb);
  469. *hdr_flags = LRO_IPV4 | LRO_TCP;
  470. return 0;
  471. }
  472. static int rxq_process(struct rx_queue *rxq, int budget)
  473. {
  474. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  475. struct net_device_stats *stats = &mp->dev->stats;
  476. int lro_flush_needed;
  477. int rx;
  478. lro_flush_needed = 0;
  479. rx = 0;
  480. while (rx < budget && rxq->rx_desc_count) {
  481. struct rx_desc *rx_desc;
  482. unsigned int cmd_sts;
  483. struct sk_buff *skb;
  484. u16 byte_cnt;
  485. rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
  486. cmd_sts = rx_desc->cmd_sts;
  487. if (cmd_sts & BUFFER_OWNED_BY_DMA)
  488. break;
  489. rmb();
  490. skb = rxq->rx_skb[rxq->rx_curr_desc];
  491. rxq->rx_skb[rxq->rx_curr_desc] = NULL;
  492. rxq->rx_curr_desc++;
  493. if (rxq->rx_curr_desc == rxq->rx_ring_size)
  494. rxq->rx_curr_desc = 0;
  495. dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
  496. rx_desc->buf_size, DMA_FROM_DEVICE);
  497. rxq->rx_desc_count--;
  498. rx++;
  499. mp->work_rx_refill |= 1 << rxq->index;
  500. byte_cnt = rx_desc->byte_cnt;
  501. /*
  502. * Update statistics.
  503. *
  504. * Note that the descriptor byte count includes 2 dummy
  505. * bytes automatically inserted by the hardware at the
  506. * start of the packet (which we don't count), and a 4
  507. * byte CRC at the end of the packet (which we do count).
  508. */
  509. stats->rx_packets++;
  510. stats->rx_bytes += byte_cnt - 2;
  511. /*
  512. * In case we received a packet without first / last bits
  513. * on, or the error summary bit is set, the packet needs
  514. * to be dropped.
  515. */
  516. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
  517. != (RX_FIRST_DESC | RX_LAST_DESC))
  518. goto err;
  519. /*
  520. * The -4 is for the CRC in the trailer of the
  521. * received packet
  522. */
  523. skb_put(skb, byte_cnt - 2 - 4);
  524. if (cmd_sts & LAYER_4_CHECKSUM_OK)
  525. skb->ip_summed = CHECKSUM_UNNECESSARY;
  526. skb->protocol = eth_type_trans(skb, mp->dev);
  527. if (skb->dev->features & NETIF_F_LRO &&
  528. skb->ip_summed == CHECKSUM_UNNECESSARY) {
  529. lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts);
  530. lro_flush_needed = 1;
  531. } else
  532. netif_receive_skb(skb);
  533. continue;
  534. err:
  535. stats->rx_dropped++;
  536. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  537. (RX_FIRST_DESC | RX_LAST_DESC)) {
  538. if (net_ratelimit())
  539. netdev_err(mp->dev,
  540. "received packet spanning multiple descriptors\n");
  541. }
  542. if (cmd_sts & ERROR_SUMMARY)
  543. stats->rx_errors++;
  544. dev_kfree_skb(skb);
  545. }
  546. if (lro_flush_needed)
  547. lro_flush_all(&rxq->lro_mgr);
  548. if (rx < budget)
  549. mp->work_rx &= ~(1 << rxq->index);
  550. return rx;
  551. }
  552. static int rxq_refill(struct rx_queue *rxq, int budget)
  553. {
  554. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  555. int refilled;
  556. refilled = 0;
  557. while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
  558. struct sk_buff *skb;
  559. int rx;
  560. struct rx_desc *rx_desc;
  561. int size;
  562. skb = __skb_dequeue(&mp->rx_recycle);
  563. if (skb == NULL)
  564. skb = dev_alloc_skb(mp->skb_size);
  565. if (skb == NULL) {
  566. mp->oom = 1;
  567. goto oom;
  568. }
  569. if (SKB_DMA_REALIGN)
  570. skb_reserve(skb, SKB_DMA_REALIGN);
  571. refilled++;
  572. rxq->rx_desc_count++;
  573. rx = rxq->rx_used_desc++;
  574. if (rxq->rx_used_desc == rxq->rx_ring_size)
  575. rxq->rx_used_desc = 0;
  576. rx_desc = rxq->rx_desc_area + rx;
  577. size = skb->end - skb->data;
  578. rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
  579. skb->data, size,
  580. DMA_FROM_DEVICE);
  581. rx_desc->buf_size = size;
  582. rxq->rx_skb[rx] = skb;
  583. wmb();
  584. rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
  585. wmb();
  586. /*
  587. * The hardware automatically prepends 2 bytes of
  588. * dummy data to each received packet, so that the
  589. * IP header ends up 16-byte aligned.
  590. */
  591. skb_reserve(skb, 2);
  592. }
  593. if (refilled < budget)
  594. mp->work_rx_refill &= ~(1 << rxq->index);
  595. oom:
  596. return refilled;
  597. }
  598. /* tx ***********************************************************************/
  599. static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
  600. {
  601. int frag;
  602. for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
  603. const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
  604. if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
  605. return 1;
  606. }
  607. return 0;
  608. }
  609. static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
  610. {
  611. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  612. int nr_frags = skb_shinfo(skb)->nr_frags;
  613. int frag;
  614. for (frag = 0; frag < nr_frags; frag++) {
  615. skb_frag_t *this_frag;
  616. int tx_index;
  617. struct tx_desc *desc;
  618. this_frag = &skb_shinfo(skb)->frags[frag];
  619. tx_index = txq->tx_curr_desc++;
  620. if (txq->tx_curr_desc == txq->tx_ring_size)
  621. txq->tx_curr_desc = 0;
  622. desc = &txq->tx_desc_area[tx_index];
  623. /*
  624. * The last fragment will generate an interrupt
  625. * which will free the skb on TX completion.
  626. */
  627. if (frag == nr_frags - 1) {
  628. desc->cmd_sts = BUFFER_OWNED_BY_DMA |
  629. ZERO_PADDING | TX_LAST_DESC |
  630. TX_ENABLE_INTERRUPT;
  631. } else {
  632. desc->cmd_sts = BUFFER_OWNED_BY_DMA;
  633. }
  634. desc->l4i_chk = 0;
  635. desc->byte_cnt = skb_frag_size(this_frag);
  636. desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
  637. this_frag, 0,
  638. skb_frag_size(this_frag),
  639. DMA_TO_DEVICE);
  640. }
  641. }
  642. static inline __be16 sum16_as_be(__sum16 sum)
  643. {
  644. return (__force __be16)sum;
  645. }
  646. static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
  647. {
  648. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  649. int nr_frags = skb_shinfo(skb)->nr_frags;
  650. int tx_index;
  651. struct tx_desc *desc;
  652. u32 cmd_sts;
  653. u16 l4i_chk;
  654. int length;
  655. cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
  656. l4i_chk = 0;
  657. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  658. int hdr_len;
  659. int tag_bytes;
  660. BUG_ON(skb->protocol != htons(ETH_P_IP) &&
  661. skb->protocol != htons(ETH_P_8021Q));
  662. hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
  663. tag_bytes = hdr_len - ETH_HLEN;
  664. if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
  665. unlikely(tag_bytes & ~12)) {
  666. if (skb_checksum_help(skb) == 0)
  667. goto no_csum;
  668. kfree_skb(skb);
  669. return 1;
  670. }
  671. if (tag_bytes & 4)
  672. cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
  673. if (tag_bytes & 8)
  674. cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
  675. cmd_sts |= GEN_TCP_UDP_CHECKSUM |
  676. GEN_IP_V4_CHECKSUM |
  677. ip_hdr(skb)->ihl << TX_IHL_SHIFT;
  678. switch (ip_hdr(skb)->protocol) {
  679. case IPPROTO_UDP:
  680. cmd_sts |= UDP_FRAME;
  681. l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
  682. break;
  683. case IPPROTO_TCP:
  684. l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
  685. break;
  686. default:
  687. BUG();
  688. }
  689. } else {
  690. no_csum:
  691. /* Errata BTS #50, IHL must be 5 if no HW checksum */
  692. cmd_sts |= 5 << TX_IHL_SHIFT;
  693. }
  694. tx_index = txq->tx_curr_desc++;
  695. if (txq->tx_curr_desc == txq->tx_ring_size)
  696. txq->tx_curr_desc = 0;
  697. desc = &txq->tx_desc_area[tx_index];
  698. if (nr_frags) {
  699. txq_submit_frag_skb(txq, skb);
  700. length = skb_headlen(skb);
  701. } else {
  702. cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
  703. length = skb->len;
  704. }
  705. desc->l4i_chk = l4i_chk;
  706. desc->byte_cnt = length;
  707. desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
  708. length, DMA_TO_DEVICE);
  709. __skb_queue_tail(&txq->tx_skb, skb);
  710. skb_tx_timestamp(skb);
  711. /* ensure all other descriptors are written before first cmd_sts */
  712. wmb();
  713. desc->cmd_sts = cmd_sts;
  714. /* clear TX_END status */
  715. mp->work_tx_end &= ~(1 << txq->index);
  716. /* ensure all descriptors are written before poking hardware */
  717. wmb();
  718. txq_enable(txq);
  719. txq->tx_desc_count += nr_frags + 1;
  720. return 0;
  721. }
  722. static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  723. {
  724. struct mv643xx_eth_private *mp = netdev_priv(dev);
  725. int length, queue;
  726. struct tx_queue *txq;
  727. struct netdev_queue *nq;
  728. queue = skb_get_queue_mapping(skb);
  729. txq = mp->txq + queue;
  730. nq = netdev_get_tx_queue(dev, queue);
  731. if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
  732. txq->tx_dropped++;
  733. netdev_printk(KERN_DEBUG, dev,
  734. "failed to linearize skb with tiny unaligned fragment\n");
  735. return NETDEV_TX_BUSY;
  736. }
  737. if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
  738. if (net_ratelimit())
  739. netdev_err(dev, "tx queue full?!\n");
  740. kfree_skb(skb);
  741. return NETDEV_TX_OK;
  742. }
  743. length = skb->len;
  744. if (!txq_submit_skb(txq, skb)) {
  745. int entries_left;
  746. txq->tx_bytes += length;
  747. txq->tx_packets++;
  748. entries_left = txq->tx_ring_size - txq->tx_desc_count;
  749. if (entries_left < MAX_SKB_FRAGS + 1)
  750. netif_tx_stop_queue(nq);
  751. }
  752. return NETDEV_TX_OK;
  753. }
  754. /* tx napi ******************************************************************/
  755. static void txq_kick(struct tx_queue *txq)
  756. {
  757. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  758. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  759. u32 hw_desc_ptr;
  760. u32 expected_ptr;
  761. __netif_tx_lock(nq, smp_processor_id());
  762. if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
  763. goto out;
  764. hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
  765. expected_ptr = (u32)txq->tx_desc_dma +
  766. txq->tx_curr_desc * sizeof(struct tx_desc);
  767. if (hw_desc_ptr != expected_ptr)
  768. txq_enable(txq);
  769. out:
  770. __netif_tx_unlock(nq);
  771. mp->work_tx_end &= ~(1 << txq->index);
  772. }
  773. static int txq_reclaim(struct tx_queue *txq, int budget, int force)
  774. {
  775. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  776. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  777. int reclaimed;
  778. __netif_tx_lock(nq, smp_processor_id());
  779. reclaimed = 0;
  780. while (reclaimed < budget && txq->tx_desc_count > 0) {
  781. int tx_index;
  782. struct tx_desc *desc;
  783. u32 cmd_sts;
  784. struct sk_buff *skb;
  785. tx_index = txq->tx_used_desc;
  786. desc = &txq->tx_desc_area[tx_index];
  787. cmd_sts = desc->cmd_sts;
  788. if (cmd_sts & BUFFER_OWNED_BY_DMA) {
  789. if (!force)
  790. break;
  791. desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
  792. }
  793. txq->tx_used_desc = tx_index + 1;
  794. if (txq->tx_used_desc == txq->tx_ring_size)
  795. txq->tx_used_desc = 0;
  796. reclaimed++;
  797. txq->tx_desc_count--;
  798. skb = NULL;
  799. if (cmd_sts & TX_LAST_DESC)
  800. skb = __skb_dequeue(&txq->tx_skb);
  801. if (cmd_sts & ERROR_SUMMARY) {
  802. netdev_info(mp->dev, "tx error\n");
  803. mp->dev->stats.tx_errors++;
  804. }
  805. if (cmd_sts & TX_FIRST_DESC) {
  806. dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
  807. desc->byte_cnt, DMA_TO_DEVICE);
  808. } else {
  809. dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
  810. desc->byte_cnt, DMA_TO_DEVICE);
  811. }
  812. if (skb != NULL) {
  813. if (skb_queue_len(&mp->rx_recycle) <
  814. mp->rx_ring_size &&
  815. skb_recycle_check(skb, mp->skb_size))
  816. __skb_queue_head(&mp->rx_recycle, skb);
  817. else
  818. dev_kfree_skb(skb);
  819. }
  820. }
  821. __netif_tx_unlock(nq);
  822. if (reclaimed < budget)
  823. mp->work_tx &= ~(1 << txq->index);
  824. return reclaimed;
  825. }
  826. /* tx rate control **********************************************************/
  827. /*
  828. * Set total maximum TX rate (shared by all TX queues for this port)
  829. * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
  830. */
  831. static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
  832. {
  833. int token_rate;
  834. int mtu;
  835. int bucket_size;
  836. token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
  837. if (token_rate > 1023)
  838. token_rate = 1023;
  839. mtu = (mp->dev->mtu + 255) >> 8;
  840. if (mtu > 63)
  841. mtu = 63;
  842. bucket_size = (burst + 255) >> 8;
  843. if (bucket_size > 65535)
  844. bucket_size = 65535;
  845. switch (mp->shared->tx_bw_control) {
  846. case TX_BW_CONTROL_OLD_LAYOUT:
  847. wrlp(mp, TX_BW_RATE, token_rate);
  848. wrlp(mp, TX_BW_MTU, mtu);
  849. wrlp(mp, TX_BW_BURST, bucket_size);
  850. break;
  851. case TX_BW_CONTROL_NEW_LAYOUT:
  852. wrlp(mp, TX_BW_RATE_MOVED, token_rate);
  853. wrlp(mp, TX_BW_MTU_MOVED, mtu);
  854. wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
  855. break;
  856. }
  857. }
  858. static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
  859. {
  860. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  861. int token_rate;
  862. int bucket_size;
  863. token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
  864. if (token_rate > 1023)
  865. token_rate = 1023;
  866. bucket_size = (burst + 255) >> 8;
  867. if (bucket_size > 65535)
  868. bucket_size = 65535;
  869. wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
  870. wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
  871. }
  872. static void txq_set_fixed_prio_mode(struct tx_queue *txq)
  873. {
  874. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  875. int off;
  876. u32 val;
  877. /*
  878. * Turn on fixed priority mode.
  879. */
  880. off = 0;
  881. switch (mp->shared->tx_bw_control) {
  882. case TX_BW_CONTROL_OLD_LAYOUT:
  883. off = TXQ_FIX_PRIO_CONF;
  884. break;
  885. case TX_BW_CONTROL_NEW_LAYOUT:
  886. off = TXQ_FIX_PRIO_CONF_MOVED;
  887. break;
  888. }
  889. if (off) {
  890. val = rdlp(mp, off);
  891. val |= 1 << txq->index;
  892. wrlp(mp, off, val);
  893. }
  894. }
  895. /* mii management interface *************************************************/
  896. static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
  897. {
  898. struct mv643xx_eth_shared_private *msp = dev_id;
  899. if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
  900. writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
  901. wake_up(&msp->smi_busy_wait);
  902. return IRQ_HANDLED;
  903. }
  904. return IRQ_NONE;
  905. }
  906. static int smi_is_done(struct mv643xx_eth_shared_private *msp)
  907. {
  908. return !(readl(msp->base + SMI_REG) & SMI_BUSY);
  909. }
  910. static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
  911. {
  912. if (msp->err_interrupt == NO_IRQ) {
  913. int i;
  914. for (i = 0; !smi_is_done(msp); i++) {
  915. if (i == 10)
  916. return -ETIMEDOUT;
  917. msleep(10);
  918. }
  919. return 0;
  920. }
  921. if (!smi_is_done(msp)) {
  922. wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
  923. msecs_to_jiffies(100));
  924. if (!smi_is_done(msp))
  925. return -ETIMEDOUT;
  926. }
  927. return 0;
  928. }
  929. static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
  930. {
  931. struct mv643xx_eth_shared_private *msp = bus->priv;
  932. void __iomem *smi_reg = msp->base + SMI_REG;
  933. int ret;
  934. if (smi_wait_ready(msp)) {
  935. pr_warn("SMI bus busy timeout\n");
  936. return -ETIMEDOUT;
  937. }
  938. writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
  939. if (smi_wait_ready(msp)) {
  940. pr_warn("SMI bus busy timeout\n");
  941. return -ETIMEDOUT;
  942. }
  943. ret = readl(smi_reg);
  944. if (!(ret & SMI_READ_VALID)) {
  945. pr_warn("SMI bus read not valid\n");
  946. return -ENODEV;
  947. }
  948. return ret & 0xffff;
  949. }
  950. static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
  951. {
  952. struct mv643xx_eth_shared_private *msp = bus->priv;
  953. void __iomem *smi_reg = msp->base + SMI_REG;
  954. if (smi_wait_ready(msp)) {
  955. pr_warn("SMI bus busy timeout\n");
  956. return -ETIMEDOUT;
  957. }
  958. writel(SMI_OPCODE_WRITE | (reg << 21) |
  959. (addr << 16) | (val & 0xffff), smi_reg);
  960. if (smi_wait_ready(msp)) {
  961. pr_warn("SMI bus busy timeout\n");
  962. return -ETIMEDOUT;
  963. }
  964. return 0;
  965. }
  966. /* statistics ***************************************************************/
  967. static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
  968. {
  969. struct mv643xx_eth_private *mp = netdev_priv(dev);
  970. struct net_device_stats *stats = &dev->stats;
  971. unsigned long tx_packets = 0;
  972. unsigned long tx_bytes = 0;
  973. unsigned long tx_dropped = 0;
  974. int i;
  975. for (i = 0; i < mp->txq_count; i++) {
  976. struct tx_queue *txq = mp->txq + i;
  977. tx_packets += txq->tx_packets;
  978. tx_bytes += txq->tx_bytes;
  979. tx_dropped += txq->tx_dropped;
  980. }
  981. stats->tx_packets = tx_packets;
  982. stats->tx_bytes = tx_bytes;
  983. stats->tx_dropped = tx_dropped;
  984. return stats;
  985. }
  986. static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp)
  987. {
  988. u32 lro_aggregated = 0;
  989. u32 lro_flushed = 0;
  990. u32 lro_no_desc = 0;
  991. int i;
  992. for (i = 0; i < mp->rxq_count; i++) {
  993. struct rx_queue *rxq = mp->rxq + i;
  994. lro_aggregated += rxq->lro_mgr.stats.aggregated;
  995. lro_flushed += rxq->lro_mgr.stats.flushed;
  996. lro_no_desc += rxq->lro_mgr.stats.no_desc;
  997. }
  998. mp->lro_counters.lro_aggregated = lro_aggregated;
  999. mp->lro_counters.lro_flushed = lro_flushed;
  1000. mp->lro_counters.lro_no_desc = lro_no_desc;
  1001. }
  1002. static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
  1003. {
  1004. return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
  1005. }
  1006. static void mib_counters_clear(struct mv643xx_eth_private *mp)
  1007. {
  1008. int i;
  1009. for (i = 0; i < 0x80; i += 4)
  1010. mib_read(mp, i);
  1011. }
  1012. static void mib_counters_update(struct mv643xx_eth_private *mp)
  1013. {
  1014. struct mib_counters *p = &mp->mib_counters;
  1015. spin_lock_bh(&mp->mib_counters_lock);
  1016. p->good_octets_received += mib_read(mp, 0x00);
  1017. p->bad_octets_received += mib_read(mp, 0x08);
  1018. p->internal_mac_transmit_err += mib_read(mp, 0x0c);
  1019. p->good_frames_received += mib_read(mp, 0x10);
  1020. p->bad_frames_received += mib_read(mp, 0x14);
  1021. p->broadcast_frames_received += mib_read(mp, 0x18);
  1022. p->multicast_frames_received += mib_read(mp, 0x1c);
  1023. p->frames_64_octets += mib_read(mp, 0x20);
  1024. p->frames_65_to_127_octets += mib_read(mp, 0x24);
  1025. p->frames_128_to_255_octets += mib_read(mp, 0x28);
  1026. p->frames_256_to_511_octets += mib_read(mp, 0x2c);
  1027. p->frames_512_to_1023_octets += mib_read(mp, 0x30);
  1028. p->frames_1024_to_max_octets += mib_read(mp, 0x34);
  1029. p->good_octets_sent += mib_read(mp, 0x38);
  1030. p->good_frames_sent += mib_read(mp, 0x40);
  1031. p->excessive_collision += mib_read(mp, 0x44);
  1032. p->multicast_frames_sent += mib_read(mp, 0x48);
  1033. p->broadcast_frames_sent += mib_read(mp, 0x4c);
  1034. p->unrec_mac_control_received += mib_read(mp, 0x50);
  1035. p->fc_sent += mib_read(mp, 0x54);
  1036. p->good_fc_received += mib_read(mp, 0x58);
  1037. p->bad_fc_received += mib_read(mp, 0x5c);
  1038. p->undersize_received += mib_read(mp, 0x60);
  1039. p->fragments_received += mib_read(mp, 0x64);
  1040. p->oversize_received += mib_read(mp, 0x68);
  1041. p->jabber_received += mib_read(mp, 0x6c);
  1042. p->mac_receive_error += mib_read(mp, 0x70);
  1043. p->bad_crc_event += mib_read(mp, 0x74);
  1044. p->collision += mib_read(mp, 0x78);
  1045. p->late_collision += mib_read(mp, 0x7c);
  1046. spin_unlock_bh(&mp->mib_counters_lock);
  1047. mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
  1048. }
  1049. static void mib_counters_timer_wrapper(unsigned long _mp)
  1050. {
  1051. struct mv643xx_eth_private *mp = (void *)_mp;
  1052. mib_counters_update(mp);
  1053. }
  1054. /* interrupt coalescing *****************************************************/
  1055. /*
  1056. * Hardware coalescing parameters are set in units of 64 t_clk
  1057. * cycles. I.e.:
  1058. *
  1059. * coal_delay_in_usec = 64000000 * register_value / t_clk_rate
  1060. *
  1061. * register_value = coal_delay_in_usec * t_clk_rate / 64000000
  1062. *
  1063. * In the ->set*() methods, we round the computed register value
  1064. * to the nearest integer.
  1065. */
  1066. static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
  1067. {
  1068. u32 val = rdlp(mp, SDMA_CONFIG);
  1069. u64 temp;
  1070. if (mp->shared->extended_rx_coal_limit)
  1071. temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
  1072. else
  1073. temp = (val & 0x003fff00) >> 8;
  1074. temp *= 64000000;
  1075. do_div(temp, mp->shared->t_clk);
  1076. return (unsigned int)temp;
  1077. }
  1078. static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
  1079. {
  1080. u64 temp;
  1081. u32 val;
  1082. temp = (u64)usec * mp->shared->t_clk;
  1083. temp += 31999999;
  1084. do_div(temp, 64000000);
  1085. val = rdlp(mp, SDMA_CONFIG);
  1086. if (mp->shared->extended_rx_coal_limit) {
  1087. if (temp > 0xffff)
  1088. temp = 0xffff;
  1089. val &= ~0x023fff80;
  1090. val |= (temp & 0x8000) << 10;
  1091. val |= (temp & 0x7fff) << 7;
  1092. } else {
  1093. if (temp > 0x3fff)
  1094. temp = 0x3fff;
  1095. val &= ~0x003fff00;
  1096. val |= (temp & 0x3fff) << 8;
  1097. }
  1098. wrlp(mp, SDMA_CONFIG, val);
  1099. }
  1100. static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
  1101. {
  1102. u64 temp;
  1103. temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
  1104. temp *= 64000000;
  1105. do_div(temp, mp->shared->t_clk);
  1106. return (unsigned int)temp;
  1107. }
  1108. static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
  1109. {
  1110. u64 temp;
  1111. temp = (u64)usec * mp->shared->t_clk;
  1112. temp += 31999999;
  1113. do_div(temp, 64000000);
  1114. if (temp > 0x3fff)
  1115. temp = 0x3fff;
  1116. wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
  1117. }
  1118. /* ethtool ******************************************************************/
  1119. struct mv643xx_eth_stats {
  1120. char stat_string[ETH_GSTRING_LEN];
  1121. int sizeof_stat;
  1122. int netdev_off;
  1123. int mp_off;
  1124. };
  1125. #define SSTAT(m) \
  1126. { #m, FIELD_SIZEOF(struct net_device_stats, m), \
  1127. offsetof(struct net_device, stats.m), -1 }
  1128. #define MIBSTAT(m) \
  1129. { #m, FIELD_SIZEOF(struct mib_counters, m), \
  1130. -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
  1131. #define LROSTAT(m) \
  1132. { #m, FIELD_SIZEOF(struct lro_counters, m), \
  1133. -1, offsetof(struct mv643xx_eth_private, lro_counters.m) }
  1134. static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
  1135. SSTAT(rx_packets),
  1136. SSTAT(tx_packets),
  1137. SSTAT(rx_bytes),
  1138. SSTAT(tx_bytes),
  1139. SSTAT(rx_errors),
  1140. SSTAT(tx_errors),
  1141. SSTAT(rx_dropped),
  1142. SSTAT(tx_dropped),
  1143. MIBSTAT(good_octets_received),
  1144. MIBSTAT(bad_octets_received),
  1145. MIBSTAT(internal_mac_transmit_err),
  1146. MIBSTAT(good_frames_received),
  1147. MIBSTAT(bad_frames_received),
  1148. MIBSTAT(broadcast_frames_received),
  1149. MIBSTAT(multicast_frames_received),
  1150. MIBSTAT(frames_64_octets),
  1151. MIBSTAT(frames_65_to_127_octets),
  1152. MIBSTAT(frames_128_to_255_octets),
  1153. MIBSTAT(frames_256_to_511_octets),
  1154. MIBSTAT(frames_512_to_1023_octets),
  1155. MIBSTAT(frames_1024_to_max_octets),
  1156. MIBSTAT(good_octets_sent),
  1157. MIBSTAT(good_frames_sent),
  1158. MIBSTAT(excessive_collision),
  1159. MIBSTAT(multicast_frames_sent),
  1160. MIBSTAT(broadcast_frames_sent),
  1161. MIBSTAT(unrec_mac_control_received),
  1162. MIBSTAT(fc_sent),
  1163. MIBSTAT(good_fc_received),
  1164. MIBSTAT(bad_fc_received),
  1165. MIBSTAT(undersize_received),
  1166. MIBSTAT(fragments_received),
  1167. MIBSTAT(oversize_received),
  1168. MIBSTAT(jabber_received),
  1169. MIBSTAT(mac_receive_error),
  1170. MIBSTAT(bad_crc_event),
  1171. MIBSTAT(collision),
  1172. MIBSTAT(late_collision),
  1173. LROSTAT(lro_aggregated),
  1174. LROSTAT(lro_flushed),
  1175. LROSTAT(lro_no_desc),
  1176. };
  1177. static int
  1178. mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp,
  1179. struct ethtool_cmd *cmd)
  1180. {
  1181. int err;
  1182. err = phy_read_status(mp->phy);
  1183. if (err == 0)
  1184. err = phy_ethtool_gset(mp->phy, cmd);
  1185. /*
  1186. * The MAC does not support 1000baseT_Half.
  1187. */
  1188. cmd->supported &= ~SUPPORTED_1000baseT_Half;
  1189. cmd->advertising &= ~ADVERTISED_1000baseT_Half;
  1190. return err;
  1191. }
  1192. static int
  1193. mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
  1194. struct ethtool_cmd *cmd)
  1195. {
  1196. u32 port_status;
  1197. port_status = rdlp(mp, PORT_STATUS);
  1198. cmd->supported = SUPPORTED_MII;
  1199. cmd->advertising = ADVERTISED_MII;
  1200. switch (port_status & PORT_SPEED_MASK) {
  1201. case PORT_SPEED_10:
  1202. ethtool_cmd_speed_set(cmd, SPEED_10);
  1203. break;
  1204. case PORT_SPEED_100:
  1205. ethtool_cmd_speed_set(cmd, SPEED_100);
  1206. break;
  1207. case PORT_SPEED_1000:
  1208. ethtool_cmd_speed_set(cmd, SPEED_1000);
  1209. break;
  1210. default:
  1211. cmd->speed = -1;
  1212. break;
  1213. }
  1214. cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
  1215. cmd->port = PORT_MII;
  1216. cmd->phy_address = 0;
  1217. cmd->transceiver = XCVR_INTERNAL;
  1218. cmd->autoneg = AUTONEG_DISABLE;
  1219. cmd->maxtxpkt = 1;
  1220. cmd->maxrxpkt = 1;
  1221. return 0;
  1222. }
  1223. static int
  1224. mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1225. {
  1226. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1227. if (mp->phy != NULL)
  1228. return mv643xx_eth_get_settings_phy(mp, cmd);
  1229. else
  1230. return mv643xx_eth_get_settings_phyless(mp, cmd);
  1231. }
  1232. static int
  1233. mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1234. {
  1235. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1236. if (mp->phy == NULL)
  1237. return -EINVAL;
  1238. /*
  1239. * The MAC does not support 1000baseT_Half.
  1240. */
  1241. cmd->advertising &= ~ADVERTISED_1000baseT_Half;
  1242. return phy_ethtool_sset(mp->phy, cmd);
  1243. }
  1244. static void mv643xx_eth_get_drvinfo(struct net_device *dev,
  1245. struct ethtool_drvinfo *drvinfo)
  1246. {
  1247. strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
  1248. strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
  1249. strncpy(drvinfo->fw_version, "N/A", 32);
  1250. strncpy(drvinfo->bus_info, "platform", 32);
  1251. drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
  1252. }
  1253. static int mv643xx_eth_nway_reset(struct net_device *dev)
  1254. {
  1255. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1256. if (mp->phy == NULL)
  1257. return -EINVAL;
  1258. return genphy_restart_aneg(mp->phy);
  1259. }
  1260. static int
  1261. mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
  1262. {
  1263. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1264. ec->rx_coalesce_usecs = get_rx_coal(mp);
  1265. ec->tx_coalesce_usecs = get_tx_coal(mp);
  1266. return 0;
  1267. }
  1268. static int
  1269. mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
  1270. {
  1271. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1272. set_rx_coal(mp, ec->rx_coalesce_usecs);
  1273. set_tx_coal(mp, ec->tx_coalesce_usecs);
  1274. return 0;
  1275. }
  1276. static void
  1277. mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
  1278. {
  1279. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1280. er->rx_max_pending = 4096;
  1281. er->tx_max_pending = 4096;
  1282. er->rx_pending = mp->rx_ring_size;
  1283. er->tx_pending = mp->tx_ring_size;
  1284. }
  1285. static int
  1286. mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
  1287. {
  1288. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1289. if (er->rx_mini_pending || er->rx_jumbo_pending)
  1290. return -EINVAL;
  1291. mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
  1292. mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096;
  1293. if (netif_running(dev)) {
  1294. mv643xx_eth_stop(dev);
  1295. if (mv643xx_eth_open(dev)) {
  1296. netdev_err(dev,
  1297. "fatal error on re-opening device after ring param change\n");
  1298. return -ENOMEM;
  1299. }
  1300. }
  1301. return 0;
  1302. }
  1303. static int
  1304. mv643xx_eth_set_features(struct net_device *dev, u32 features)
  1305. {
  1306. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1307. u32 rx_csum = features & NETIF_F_RXCSUM;
  1308. wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
  1309. return 0;
  1310. }
  1311. static void mv643xx_eth_get_strings(struct net_device *dev,
  1312. uint32_t stringset, uint8_t *data)
  1313. {
  1314. int i;
  1315. if (stringset == ETH_SS_STATS) {
  1316. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1317. memcpy(data + i * ETH_GSTRING_LEN,
  1318. mv643xx_eth_stats[i].stat_string,
  1319. ETH_GSTRING_LEN);
  1320. }
  1321. }
  1322. }
  1323. static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
  1324. struct ethtool_stats *stats,
  1325. uint64_t *data)
  1326. {
  1327. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1328. int i;
  1329. mv643xx_eth_get_stats(dev);
  1330. mib_counters_update(mp);
  1331. mv643xx_eth_grab_lro_stats(mp);
  1332. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1333. const struct mv643xx_eth_stats *stat;
  1334. void *p;
  1335. stat = mv643xx_eth_stats + i;
  1336. if (stat->netdev_off >= 0)
  1337. p = ((void *)mp->dev) + stat->netdev_off;
  1338. else
  1339. p = ((void *)mp) + stat->mp_off;
  1340. data[i] = (stat->sizeof_stat == 8) ?
  1341. *(uint64_t *)p : *(uint32_t *)p;
  1342. }
  1343. }
  1344. static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
  1345. {
  1346. if (sset == ETH_SS_STATS)
  1347. return ARRAY_SIZE(mv643xx_eth_stats);
  1348. return -EOPNOTSUPP;
  1349. }
  1350. static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
  1351. .get_settings = mv643xx_eth_get_settings,
  1352. .set_settings = mv643xx_eth_set_settings,
  1353. .get_drvinfo = mv643xx_eth_get_drvinfo,
  1354. .nway_reset = mv643xx_eth_nway_reset,
  1355. .get_link = ethtool_op_get_link,
  1356. .get_coalesce = mv643xx_eth_get_coalesce,
  1357. .set_coalesce = mv643xx_eth_set_coalesce,
  1358. .get_ringparam = mv643xx_eth_get_ringparam,
  1359. .set_ringparam = mv643xx_eth_set_ringparam,
  1360. .get_strings = mv643xx_eth_get_strings,
  1361. .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
  1362. .get_sset_count = mv643xx_eth_get_sset_count,
  1363. };
  1364. /* address handling *********************************************************/
  1365. static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
  1366. {
  1367. unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
  1368. unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
  1369. addr[0] = (mac_h >> 24) & 0xff;
  1370. addr[1] = (mac_h >> 16) & 0xff;
  1371. addr[2] = (mac_h >> 8) & 0xff;
  1372. addr[3] = mac_h & 0xff;
  1373. addr[4] = (mac_l >> 8) & 0xff;
  1374. addr[5] = mac_l & 0xff;
  1375. }
  1376. static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
  1377. {
  1378. wrlp(mp, MAC_ADDR_HIGH,
  1379. (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
  1380. wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
  1381. }
  1382. static u32 uc_addr_filter_mask(struct net_device *dev)
  1383. {
  1384. struct netdev_hw_addr *ha;
  1385. u32 nibbles;
  1386. if (dev->flags & IFF_PROMISC)
  1387. return 0;
  1388. nibbles = 1 << (dev->dev_addr[5] & 0x0f);
  1389. netdev_for_each_uc_addr(ha, dev) {
  1390. if (memcmp(dev->dev_addr, ha->addr, 5))
  1391. return 0;
  1392. if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
  1393. return 0;
  1394. nibbles |= 1 << (ha->addr[5] & 0x0f);
  1395. }
  1396. return nibbles;
  1397. }
  1398. static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
  1399. {
  1400. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1401. u32 port_config;
  1402. u32 nibbles;
  1403. int i;
  1404. uc_addr_set(mp, dev->dev_addr);
  1405. port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
  1406. nibbles = uc_addr_filter_mask(dev);
  1407. if (!nibbles) {
  1408. port_config |= UNICAST_PROMISCUOUS_MODE;
  1409. nibbles = 0xffff;
  1410. }
  1411. for (i = 0; i < 16; i += 4) {
  1412. int off = UNICAST_TABLE(mp->port_num) + i;
  1413. u32 v;
  1414. v = 0;
  1415. if (nibbles & 1)
  1416. v |= 0x00000001;
  1417. if (nibbles & 2)
  1418. v |= 0x00000100;
  1419. if (nibbles & 4)
  1420. v |= 0x00010000;
  1421. if (nibbles & 8)
  1422. v |= 0x01000000;
  1423. nibbles >>= 4;
  1424. wrl(mp, off, v);
  1425. }
  1426. wrlp(mp, PORT_CONFIG, port_config);
  1427. }
  1428. static int addr_crc(unsigned char *addr)
  1429. {
  1430. int crc = 0;
  1431. int i;
  1432. for (i = 0; i < 6; i++) {
  1433. int j;
  1434. crc = (crc ^ addr[i]) << 8;
  1435. for (j = 7; j >= 0; j--) {
  1436. if (crc & (0x100 << j))
  1437. crc ^= 0x107 << j;
  1438. }
  1439. }
  1440. return crc;
  1441. }
  1442. static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
  1443. {
  1444. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1445. u32 *mc_spec;
  1446. u32 *mc_other;
  1447. struct netdev_hw_addr *ha;
  1448. int i;
  1449. if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  1450. int port_num;
  1451. u32 accept;
  1452. oom:
  1453. port_num = mp->port_num;
  1454. accept = 0x01010101;
  1455. for (i = 0; i < 0x100; i += 4) {
  1456. wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
  1457. wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
  1458. }
  1459. return;
  1460. }
  1461. mc_spec = kmalloc(0x200, GFP_ATOMIC);
  1462. if (mc_spec == NULL)
  1463. goto oom;
  1464. mc_other = mc_spec + (0x100 >> 2);
  1465. memset(mc_spec, 0, 0x100);
  1466. memset(mc_other, 0, 0x100);
  1467. netdev_for_each_mc_addr(ha, dev) {
  1468. u8 *a = ha->addr;
  1469. u32 *table;
  1470. int entry;
  1471. if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
  1472. table = mc_spec;
  1473. entry = a[5];
  1474. } else {
  1475. table = mc_other;
  1476. entry = addr_crc(a);
  1477. }
  1478. table[entry >> 2] |= 1 << (8 * (entry & 3));
  1479. }
  1480. for (i = 0; i < 0x100; i += 4) {
  1481. wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
  1482. wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
  1483. }
  1484. kfree(mc_spec);
  1485. }
  1486. static void mv643xx_eth_set_rx_mode(struct net_device *dev)
  1487. {
  1488. mv643xx_eth_program_unicast_filter(dev);
  1489. mv643xx_eth_program_multicast_filter(dev);
  1490. }
  1491. static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
  1492. {
  1493. struct sockaddr *sa = addr;
  1494. if (!is_valid_ether_addr(sa->sa_data))
  1495. return -EINVAL;
  1496. memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
  1497. netif_addr_lock_bh(dev);
  1498. mv643xx_eth_program_unicast_filter(dev);
  1499. netif_addr_unlock_bh(dev);
  1500. return 0;
  1501. }
  1502. /* rx/tx queue initialisation ***********************************************/
  1503. static int rxq_init(struct mv643xx_eth_private *mp, int index)
  1504. {
  1505. struct rx_queue *rxq = mp->rxq + index;
  1506. struct rx_desc *rx_desc;
  1507. int size;
  1508. int i;
  1509. rxq->index = index;
  1510. rxq->rx_ring_size = mp->rx_ring_size;
  1511. rxq->rx_desc_count = 0;
  1512. rxq->rx_curr_desc = 0;
  1513. rxq->rx_used_desc = 0;
  1514. size = rxq->rx_ring_size * sizeof(struct rx_desc);
  1515. if (index == 0 && size <= mp->rx_desc_sram_size) {
  1516. rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
  1517. mp->rx_desc_sram_size);
  1518. rxq->rx_desc_dma = mp->rx_desc_sram_addr;
  1519. } else {
  1520. rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
  1521. size, &rxq->rx_desc_dma,
  1522. GFP_KERNEL);
  1523. }
  1524. if (rxq->rx_desc_area == NULL) {
  1525. netdev_err(mp->dev,
  1526. "can't allocate rx ring (%d bytes)\n", size);
  1527. goto out;
  1528. }
  1529. memset(rxq->rx_desc_area, 0, size);
  1530. rxq->rx_desc_area_size = size;
  1531. rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
  1532. GFP_KERNEL);
  1533. if (rxq->rx_skb == NULL) {
  1534. netdev_err(mp->dev, "can't allocate rx skb ring\n");
  1535. goto out_free;
  1536. }
  1537. rx_desc = (struct rx_desc *)rxq->rx_desc_area;
  1538. for (i = 0; i < rxq->rx_ring_size; i++) {
  1539. int nexti;
  1540. nexti = i + 1;
  1541. if (nexti == rxq->rx_ring_size)
  1542. nexti = 0;
  1543. rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
  1544. nexti * sizeof(struct rx_desc);
  1545. }
  1546. rxq->lro_mgr.dev = mp->dev;
  1547. memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats));
  1548. rxq->lro_mgr.features = LRO_F_NAPI;
  1549. rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
  1550. rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
  1551. rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr);
  1552. rxq->lro_mgr.max_aggr = 32;
  1553. rxq->lro_mgr.frag_align_pad = 0;
  1554. rxq->lro_mgr.lro_arr = rxq->lro_arr;
  1555. rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header;
  1556. memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr));
  1557. return 0;
  1558. out_free:
  1559. if (index == 0 && size <= mp->rx_desc_sram_size)
  1560. iounmap(rxq->rx_desc_area);
  1561. else
  1562. dma_free_coherent(mp->dev->dev.parent, size,
  1563. rxq->rx_desc_area,
  1564. rxq->rx_desc_dma);
  1565. out:
  1566. return -ENOMEM;
  1567. }
  1568. static void rxq_deinit(struct rx_queue *rxq)
  1569. {
  1570. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  1571. int i;
  1572. rxq_disable(rxq);
  1573. for (i = 0; i < rxq->rx_ring_size; i++) {
  1574. if (rxq->rx_skb[i]) {
  1575. dev_kfree_skb(rxq->rx_skb[i]);
  1576. rxq->rx_desc_count--;
  1577. }
  1578. }
  1579. if (rxq->rx_desc_count) {
  1580. netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
  1581. rxq->rx_desc_count);
  1582. }
  1583. if (rxq->index == 0 &&
  1584. rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
  1585. iounmap(rxq->rx_desc_area);
  1586. else
  1587. dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
  1588. rxq->rx_desc_area, rxq->rx_desc_dma);
  1589. kfree(rxq->rx_skb);
  1590. }
  1591. static int txq_init(struct mv643xx_eth_private *mp, int index)
  1592. {
  1593. struct tx_queue *txq = mp->txq + index;
  1594. struct tx_desc *tx_desc;
  1595. int size;
  1596. int i;
  1597. txq->index = index;
  1598. txq->tx_ring_size = mp->tx_ring_size;
  1599. txq->tx_desc_count = 0;
  1600. txq->tx_curr_desc = 0;
  1601. txq->tx_used_desc = 0;
  1602. size = txq->tx_ring_size * sizeof(struct tx_desc);
  1603. if (index == 0 && size <= mp->tx_desc_sram_size) {
  1604. txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
  1605. mp->tx_desc_sram_size);
  1606. txq->tx_desc_dma = mp->tx_desc_sram_addr;
  1607. } else {
  1608. txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
  1609. size, &txq->tx_desc_dma,
  1610. GFP_KERNEL);
  1611. }
  1612. if (txq->tx_desc_area == NULL) {
  1613. netdev_err(mp->dev,
  1614. "can't allocate tx ring (%d bytes)\n", size);
  1615. return -ENOMEM;
  1616. }
  1617. memset(txq->tx_desc_area, 0, size);
  1618. txq->tx_desc_area_size = size;
  1619. tx_desc = (struct tx_desc *)txq->tx_desc_area;
  1620. for (i = 0; i < txq->tx_ring_size; i++) {
  1621. struct tx_desc *txd = tx_desc + i;
  1622. int nexti;
  1623. nexti = i + 1;
  1624. if (nexti == txq->tx_ring_size)
  1625. nexti = 0;
  1626. txd->cmd_sts = 0;
  1627. txd->next_desc_ptr = txq->tx_desc_dma +
  1628. nexti * sizeof(struct tx_desc);
  1629. }
  1630. skb_queue_head_init(&txq->tx_skb);
  1631. return 0;
  1632. }
  1633. static void txq_deinit(struct tx_queue *txq)
  1634. {
  1635. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  1636. txq_disable(txq);
  1637. txq_reclaim(txq, txq->tx_ring_size, 1);
  1638. BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
  1639. if (txq->index == 0 &&
  1640. txq->tx_desc_area_size <= mp->tx_desc_sram_size)
  1641. iounmap(txq->tx_desc_area);
  1642. else
  1643. dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
  1644. txq->tx_desc_area, txq->tx_desc_dma);
  1645. }
  1646. /* netdev ops and related ***************************************************/
  1647. static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
  1648. {
  1649. u32 int_cause;
  1650. u32 int_cause_ext;
  1651. int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
  1652. if (int_cause == 0)
  1653. return 0;
  1654. int_cause_ext = 0;
  1655. if (int_cause & INT_EXT) {
  1656. int_cause &= ~INT_EXT;
  1657. int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
  1658. }
  1659. if (int_cause) {
  1660. wrlp(mp, INT_CAUSE, ~int_cause);
  1661. mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
  1662. ~(rdlp(mp, TXQ_COMMAND) & 0xff);
  1663. mp->work_rx |= (int_cause & INT_RX) >> 2;
  1664. }
  1665. int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
  1666. if (int_cause_ext) {
  1667. wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
  1668. if (int_cause_ext & INT_EXT_LINK_PHY)
  1669. mp->work_link = 1;
  1670. mp->work_tx |= int_cause_ext & INT_EXT_TX;
  1671. }
  1672. return 1;
  1673. }
  1674. static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
  1675. {
  1676. struct net_device *dev = (struct net_device *)dev_id;
  1677. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1678. if (unlikely(!mv643xx_eth_collect_events(mp)))
  1679. return IRQ_NONE;
  1680. wrlp(mp, INT_MASK, 0);
  1681. napi_schedule(&mp->napi);
  1682. return IRQ_HANDLED;
  1683. }
  1684. static void handle_link_event(struct mv643xx_eth_private *mp)
  1685. {
  1686. struct net_device *dev = mp->dev;
  1687. u32 port_status;
  1688. int speed;
  1689. int duplex;
  1690. int fc;
  1691. port_status = rdlp(mp, PORT_STATUS);
  1692. if (!(port_status & LINK_UP)) {
  1693. if (netif_carrier_ok(dev)) {
  1694. int i;
  1695. netdev_info(dev, "link down\n");
  1696. netif_carrier_off(dev);
  1697. for (i = 0; i < mp->txq_count; i++) {
  1698. struct tx_queue *txq = mp->txq + i;
  1699. txq_reclaim(txq, txq->tx_ring_size, 1);
  1700. txq_reset_hw_ptr(txq);
  1701. }
  1702. }
  1703. return;
  1704. }
  1705. switch (port_status & PORT_SPEED_MASK) {
  1706. case PORT_SPEED_10:
  1707. speed = 10;
  1708. break;
  1709. case PORT_SPEED_100:
  1710. speed = 100;
  1711. break;
  1712. case PORT_SPEED_1000:
  1713. speed = 1000;
  1714. break;
  1715. default:
  1716. speed = -1;
  1717. break;
  1718. }
  1719. duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
  1720. fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
  1721. netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
  1722. speed, duplex ? "full" : "half", fc ? "en" : "dis");
  1723. if (!netif_carrier_ok(dev))
  1724. netif_carrier_on(dev);
  1725. }
  1726. static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
  1727. {
  1728. struct mv643xx_eth_private *mp;
  1729. int work_done;
  1730. mp = container_of(napi, struct mv643xx_eth_private, napi);
  1731. if (unlikely(mp->oom)) {
  1732. mp->oom = 0;
  1733. del_timer(&mp->rx_oom);
  1734. }
  1735. work_done = 0;
  1736. while (work_done < budget) {
  1737. u8 queue_mask;
  1738. int queue;
  1739. int work_tbd;
  1740. if (mp->work_link) {
  1741. mp->work_link = 0;
  1742. handle_link_event(mp);
  1743. work_done++;
  1744. continue;
  1745. }
  1746. queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
  1747. if (likely(!mp->oom))
  1748. queue_mask |= mp->work_rx_refill;
  1749. if (!queue_mask) {
  1750. if (mv643xx_eth_collect_events(mp))
  1751. continue;
  1752. break;
  1753. }
  1754. queue = fls(queue_mask) - 1;
  1755. queue_mask = 1 << queue;
  1756. work_tbd = budget - work_done;
  1757. if (work_tbd > 16)
  1758. work_tbd = 16;
  1759. if (mp->work_tx_end & queue_mask) {
  1760. txq_kick(mp->txq + queue);
  1761. } else if (mp->work_tx & queue_mask) {
  1762. work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
  1763. txq_maybe_wake(mp->txq + queue);
  1764. } else if (mp->work_rx & queue_mask) {
  1765. work_done += rxq_process(mp->rxq + queue, work_tbd);
  1766. } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
  1767. work_done += rxq_refill(mp->rxq + queue, work_tbd);
  1768. } else {
  1769. BUG();
  1770. }
  1771. }
  1772. if (work_done < budget) {
  1773. if (mp->oom)
  1774. mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
  1775. napi_complete(napi);
  1776. wrlp(mp, INT_MASK, mp->int_mask);
  1777. }
  1778. return work_done;
  1779. }
  1780. static inline void oom_timer_wrapper(unsigned long data)
  1781. {
  1782. struct mv643xx_eth_private *mp = (void *)data;
  1783. napi_schedule(&mp->napi);
  1784. }
  1785. static void phy_reset(struct mv643xx_eth_private *mp)
  1786. {
  1787. int data;
  1788. data = phy_read(mp->phy, MII_BMCR);
  1789. if (data < 0)
  1790. return;
  1791. data |= BMCR_RESET;
  1792. if (phy_write(mp->phy, MII_BMCR, data) < 0)
  1793. return;
  1794. do {
  1795. data = phy_read(mp->phy, MII_BMCR);
  1796. } while (data >= 0 && data & BMCR_RESET);
  1797. }
  1798. static void port_start(struct mv643xx_eth_private *mp)
  1799. {
  1800. u32 pscr;
  1801. int i;
  1802. /*
  1803. * Perform PHY reset, if there is a PHY.
  1804. */
  1805. if (mp->phy != NULL) {
  1806. struct ethtool_cmd cmd;
  1807. mv643xx_eth_get_settings(mp->dev, &cmd);
  1808. phy_reset(mp);
  1809. mv643xx_eth_set_settings(mp->dev, &cmd);
  1810. }
  1811. /*
  1812. * Configure basic link parameters.
  1813. */
  1814. pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  1815. pscr |= SERIAL_PORT_ENABLE;
  1816. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  1817. pscr |= DO_NOT_FORCE_LINK_FAIL;
  1818. if (mp->phy == NULL)
  1819. pscr |= FORCE_LINK_PASS;
  1820. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  1821. /*
  1822. * Configure TX path and queues.
  1823. */
  1824. tx_set_rate(mp, 1000000000, 16777216);
  1825. for (i = 0; i < mp->txq_count; i++) {
  1826. struct tx_queue *txq = mp->txq + i;
  1827. txq_reset_hw_ptr(txq);
  1828. txq_set_rate(txq, 1000000000, 16777216);
  1829. txq_set_fixed_prio_mode(txq);
  1830. }
  1831. /*
  1832. * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
  1833. * frames to RX queue #0, and include the pseudo-header when
  1834. * calculating receive checksums.
  1835. */
  1836. mv643xx_eth_set_features(mp->dev, mp->dev->features);
  1837. /*
  1838. * Treat BPDUs as normal multicasts, and disable partition mode.
  1839. */
  1840. wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
  1841. /*
  1842. * Add configured unicast addresses to address filter table.
  1843. */
  1844. mv643xx_eth_program_unicast_filter(mp->dev);
  1845. /*
  1846. * Enable the receive queues.
  1847. */
  1848. for (i = 0; i < mp->rxq_count; i++) {
  1849. struct rx_queue *rxq = mp->rxq + i;
  1850. u32 addr;
  1851. addr = (u32)rxq->rx_desc_dma;
  1852. addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
  1853. wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
  1854. rxq_enable(rxq);
  1855. }
  1856. }
  1857. static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
  1858. {
  1859. int skb_size;
  1860. /*
  1861. * Reserve 2+14 bytes for an ethernet header (the hardware
  1862. * automatically prepends 2 bytes of dummy data to each
  1863. * received packet), 16 bytes for up to four VLAN tags, and
  1864. * 4 bytes for the trailing FCS -- 36 bytes total.
  1865. */
  1866. skb_size = mp->dev->mtu + 36;
  1867. /*
  1868. * Make sure that the skb size is a multiple of 8 bytes, as
  1869. * the lower three bits of the receive descriptor's buffer
  1870. * size field are ignored by the hardware.
  1871. */
  1872. mp->skb_size = (skb_size + 7) & ~7;
  1873. /*
  1874. * If NET_SKB_PAD is smaller than a cache line,
  1875. * netdev_alloc_skb() will cause skb->data to be misaligned
  1876. * to a cache line boundary. If this is the case, include
  1877. * some extra space to allow re-aligning the data area.
  1878. */
  1879. mp->skb_size += SKB_DMA_REALIGN;
  1880. }
  1881. static int mv643xx_eth_open(struct net_device *dev)
  1882. {
  1883. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1884. int err;
  1885. int i;
  1886. wrlp(mp, INT_CAUSE, 0);
  1887. wrlp(mp, INT_CAUSE_EXT, 0);
  1888. rdlp(mp, INT_CAUSE_EXT);
  1889. err = request_irq(dev->irq, mv643xx_eth_irq,
  1890. IRQF_SHARED, dev->name, dev);
  1891. if (err) {
  1892. netdev_err(dev, "can't assign irq\n");
  1893. return -EAGAIN;
  1894. }
  1895. mv643xx_eth_recalc_skb_size(mp);
  1896. napi_enable(&mp->napi);
  1897. skb_queue_head_init(&mp->rx_recycle);
  1898. mp->int_mask = INT_EXT;
  1899. for (i = 0; i < mp->rxq_count; i++) {
  1900. err = rxq_init(mp, i);
  1901. if (err) {
  1902. while (--i >= 0)
  1903. rxq_deinit(mp->rxq + i);
  1904. goto out;
  1905. }
  1906. rxq_refill(mp->rxq + i, INT_MAX);
  1907. mp->int_mask |= INT_RX_0 << i;
  1908. }
  1909. if (mp->oom) {
  1910. mp->rx_oom.expires = jiffies + (HZ / 10);
  1911. add_timer(&mp->rx_oom);
  1912. }
  1913. for (i = 0; i < mp->txq_count; i++) {
  1914. err = txq_init(mp, i);
  1915. if (err) {
  1916. while (--i >= 0)
  1917. txq_deinit(mp->txq + i);
  1918. goto out_free;
  1919. }
  1920. mp->int_mask |= INT_TX_END_0 << i;
  1921. }
  1922. port_start(mp);
  1923. wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
  1924. wrlp(mp, INT_MASK, mp->int_mask);
  1925. return 0;
  1926. out_free:
  1927. for (i = 0; i < mp->rxq_count; i++)
  1928. rxq_deinit(mp->rxq + i);
  1929. out:
  1930. free_irq(dev->irq, dev);
  1931. return err;
  1932. }
  1933. static void port_reset(struct mv643xx_eth_private *mp)
  1934. {
  1935. unsigned int data;
  1936. int i;
  1937. for (i = 0; i < mp->rxq_count; i++)
  1938. rxq_disable(mp->rxq + i);
  1939. for (i = 0; i < mp->txq_count; i++)
  1940. txq_disable(mp->txq + i);
  1941. while (1) {
  1942. u32 ps = rdlp(mp, PORT_STATUS);
  1943. if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
  1944. break;
  1945. udelay(10);
  1946. }
  1947. /* Reset the Enable bit in the Configuration Register */
  1948. data = rdlp(mp, PORT_SERIAL_CONTROL);
  1949. data &= ~(SERIAL_PORT_ENABLE |
  1950. DO_NOT_FORCE_LINK_FAIL |
  1951. FORCE_LINK_PASS);
  1952. wrlp(mp, PORT_SERIAL_CONTROL, data);
  1953. }
  1954. static int mv643xx_eth_stop(struct net_device *dev)
  1955. {
  1956. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1957. int i;
  1958. wrlp(mp, INT_MASK_EXT, 0x00000000);
  1959. wrlp(mp, INT_MASK, 0x00000000);
  1960. rdlp(mp, INT_MASK);
  1961. napi_disable(&mp->napi);
  1962. del_timer_sync(&mp->rx_oom);
  1963. netif_carrier_off(dev);
  1964. free_irq(dev->irq, dev);
  1965. port_reset(mp);
  1966. mv643xx_eth_get_stats(dev);
  1967. mib_counters_update(mp);
  1968. del_timer_sync(&mp->mib_counters_timer);
  1969. skb_queue_purge(&mp->rx_recycle);
  1970. for (i = 0; i < mp->rxq_count; i++)
  1971. rxq_deinit(mp->rxq + i);
  1972. for (i = 0; i < mp->txq_count; i++)
  1973. txq_deinit(mp->txq + i);
  1974. return 0;
  1975. }
  1976. static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1977. {
  1978. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1979. if (mp->phy != NULL)
  1980. return phy_mii_ioctl(mp->phy, ifr, cmd);
  1981. return -EOPNOTSUPP;
  1982. }
  1983. static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
  1984. {
  1985. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1986. if (new_mtu < 64 || new_mtu > 9500)
  1987. return -EINVAL;
  1988. dev->mtu = new_mtu;
  1989. mv643xx_eth_recalc_skb_size(mp);
  1990. tx_set_rate(mp, 1000000000, 16777216);
  1991. if (!netif_running(dev))
  1992. return 0;
  1993. /*
  1994. * Stop and then re-open the interface. This will allocate RX
  1995. * skbs of the new MTU.
  1996. * There is a possible danger that the open will not succeed,
  1997. * due to memory being full.
  1998. */
  1999. mv643xx_eth_stop(dev);
  2000. if (mv643xx_eth_open(dev)) {
  2001. netdev_err(dev,
  2002. "fatal error on re-opening device after MTU change\n");
  2003. }
  2004. return 0;
  2005. }
  2006. static void tx_timeout_task(struct work_struct *ugly)
  2007. {
  2008. struct mv643xx_eth_private *mp;
  2009. mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
  2010. if (netif_running(mp->dev)) {
  2011. netif_tx_stop_all_queues(mp->dev);
  2012. port_reset(mp);
  2013. port_start(mp);
  2014. netif_tx_wake_all_queues(mp->dev);
  2015. }
  2016. }
  2017. static void mv643xx_eth_tx_timeout(struct net_device *dev)
  2018. {
  2019. struct mv643xx_eth_private *mp = netdev_priv(dev);
  2020. netdev_info(dev, "tx timeout\n");
  2021. schedule_work(&mp->tx_timeout_task);
  2022. }
  2023. #ifdef CONFIG_NET_POLL_CONTROLLER
  2024. static void mv643xx_eth_netpoll(struct net_device *dev)
  2025. {
  2026. struct mv643xx_eth_private *mp = netdev_priv(dev);
  2027. wrlp(mp, INT_MASK, 0x00000000);
  2028. rdlp(mp, INT_MASK);
  2029. mv643xx_eth_irq(dev->irq, dev);
  2030. wrlp(mp, INT_MASK, mp->int_mask);
  2031. }
  2032. #endif
  2033. /* platform glue ************************************************************/
  2034. static void
  2035. mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
  2036. struct mbus_dram_target_info *dram)
  2037. {
  2038. void __iomem *base = msp->base;
  2039. u32 win_enable;
  2040. u32 win_protect;
  2041. int i;
  2042. for (i = 0; i < 6; i++) {
  2043. writel(0, base + WINDOW_BASE(i));
  2044. writel(0, base + WINDOW_SIZE(i));
  2045. if (i < 4)
  2046. writel(0, base + WINDOW_REMAP_HIGH(i));
  2047. }
  2048. win_enable = 0x3f;
  2049. win_protect = 0;
  2050. for (i = 0; i < dram->num_cs; i++) {
  2051. struct mbus_dram_window *cs = dram->cs + i;
  2052. writel((cs->base & 0xffff0000) |
  2053. (cs->mbus_attr << 8) |
  2054. dram->mbus_dram_target_id, base + WINDOW_BASE(i));
  2055. writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
  2056. win_enable &= ~(1 << i);
  2057. win_protect |= 3 << (2 * i);
  2058. }
  2059. writel(win_enable, base + WINDOW_BAR_ENABLE);
  2060. msp->win_protect = win_protect;
  2061. }
  2062. static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
  2063. {
  2064. /*
  2065. * Check whether we have a 14-bit coal limit field in bits
  2066. * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
  2067. * SDMA config register.
  2068. */
  2069. writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
  2070. if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
  2071. msp->extended_rx_coal_limit = 1;
  2072. else
  2073. msp->extended_rx_coal_limit = 0;
  2074. /*
  2075. * Check whether the MAC supports TX rate control, and if
  2076. * yes, whether its associated registers are in the old or
  2077. * the new place.
  2078. */
  2079. writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
  2080. if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
  2081. msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
  2082. } else {
  2083. writel(7, msp->base + 0x0400 + TX_BW_RATE);
  2084. if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
  2085. msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
  2086. else
  2087. msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
  2088. }
  2089. }
  2090. static int mv643xx_eth_shared_probe(struct platform_device *pdev)
  2091. {
  2092. static int mv643xx_eth_version_printed;
  2093. struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
  2094. struct mv643xx_eth_shared_private *msp;
  2095. struct resource *res;
  2096. int ret;
  2097. if (!mv643xx_eth_version_printed++)
  2098. pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
  2099. mv643xx_eth_driver_version);
  2100. ret = -EINVAL;
  2101. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2102. if (res == NULL)
  2103. goto out;
  2104. ret = -ENOMEM;
  2105. msp = kzalloc(sizeof(*msp), GFP_KERNEL);
  2106. if (msp == NULL)
  2107. goto out;
  2108. msp->base = ioremap(res->start, resource_size(res));
  2109. if (msp->base == NULL)
  2110. goto out_free;
  2111. /*
  2112. * Set up and register SMI bus.
  2113. */
  2114. if (pd == NULL || pd->shared_smi == NULL) {
  2115. msp->smi_bus = mdiobus_alloc();
  2116. if (msp->smi_bus == NULL)
  2117. goto out_unmap;
  2118. msp->smi_bus->priv = msp;
  2119. msp->smi_bus->name = "mv643xx_eth smi";
  2120. msp->smi_bus->read = smi_bus_read;
  2121. msp->smi_bus->write = smi_bus_write,
  2122. snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
  2123. msp->smi_bus->parent = &pdev->dev;
  2124. msp->smi_bus->phy_mask = 0xffffffff;
  2125. if (mdiobus_register(msp->smi_bus) < 0)
  2126. goto out_free_mii_bus;
  2127. msp->smi = msp;
  2128. } else {
  2129. msp->smi = platform_get_drvdata(pd->shared_smi);
  2130. }
  2131. msp->err_interrupt = NO_IRQ;
  2132. init_waitqueue_head(&msp->smi_busy_wait);
  2133. /*
  2134. * Check whether the error interrupt is hooked up.
  2135. */
  2136. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  2137. if (res != NULL) {
  2138. int err;
  2139. err = request_irq(res->start, mv643xx_eth_err_irq,
  2140. IRQF_SHARED, "mv643xx_eth", msp);
  2141. if (!err) {
  2142. writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
  2143. msp->err_interrupt = res->start;
  2144. }
  2145. }
  2146. /*
  2147. * (Re-)program MBUS remapping windows if we are asked to.
  2148. */
  2149. if (pd != NULL && pd->dram != NULL)
  2150. mv643xx_eth_conf_mbus_windows(msp, pd->dram);
  2151. /*
  2152. * Detect hardware parameters.
  2153. */
  2154. msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
  2155. msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
  2156. pd->tx_csum_limit : 9 * 1024;
  2157. infer_hw_params(msp);
  2158. platform_set_drvdata(pdev, msp);
  2159. return 0;
  2160. out_free_mii_bus:
  2161. mdiobus_free(msp->smi_bus);
  2162. out_unmap:
  2163. iounmap(msp->base);
  2164. out_free:
  2165. kfree(msp);
  2166. out:
  2167. return ret;
  2168. }
  2169. static int mv643xx_eth_shared_remove(struct platform_device *pdev)
  2170. {
  2171. struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
  2172. struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
  2173. if (pd == NULL || pd->shared_smi == NULL) {
  2174. mdiobus_unregister(msp->smi_bus);
  2175. mdiobus_free(msp->smi_bus);
  2176. }
  2177. if (msp->err_interrupt != NO_IRQ)
  2178. free_irq(msp->err_interrupt, msp);
  2179. iounmap(msp->base);
  2180. kfree(msp);
  2181. return 0;
  2182. }
  2183. static struct platform_driver mv643xx_eth_shared_driver = {
  2184. .probe = mv643xx_eth_shared_probe,
  2185. .remove = mv643xx_eth_shared_remove,
  2186. .driver = {
  2187. .name = MV643XX_ETH_SHARED_NAME,
  2188. .owner = THIS_MODULE,
  2189. },
  2190. };
  2191. static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
  2192. {
  2193. int addr_shift = 5 * mp->port_num;
  2194. u32 data;
  2195. data = rdl(mp, PHY_ADDR);
  2196. data &= ~(0x1f << addr_shift);
  2197. data |= (phy_addr & 0x1f) << addr_shift;
  2198. wrl(mp, PHY_ADDR, data);
  2199. }
  2200. static int phy_addr_get(struct mv643xx_eth_private *mp)
  2201. {
  2202. unsigned int data;
  2203. data = rdl(mp, PHY_ADDR);
  2204. return (data >> (5 * mp->port_num)) & 0x1f;
  2205. }
  2206. static void set_params(struct mv643xx_eth_private *mp,
  2207. struct mv643xx_eth_platform_data *pd)
  2208. {
  2209. struct net_device *dev = mp->dev;
  2210. if (is_valid_ether_addr(pd->mac_addr))
  2211. memcpy(dev->dev_addr, pd->mac_addr, 6);
  2212. else
  2213. uc_addr_get(mp, dev->dev_addr);
  2214. mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
  2215. if (pd->rx_queue_size)
  2216. mp->rx_ring_size = pd->rx_queue_size;
  2217. mp->rx_desc_sram_addr = pd->rx_sram_addr;
  2218. mp->rx_desc_sram_size = pd->rx_sram_size;
  2219. mp->rxq_count = pd->rx_queue_count ? : 1;
  2220. mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
  2221. if (pd->tx_queue_size)
  2222. mp->tx_ring_size = pd->tx_queue_size;
  2223. mp->tx_desc_sram_addr = pd->tx_sram_addr;
  2224. mp->tx_desc_sram_size = pd->tx_sram_size;
  2225. mp->txq_count = pd->tx_queue_count ? : 1;
  2226. }
  2227. static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
  2228. int phy_addr)
  2229. {
  2230. struct mii_bus *bus = mp->shared->smi->smi_bus;
  2231. struct phy_device *phydev;
  2232. int start;
  2233. int num;
  2234. int i;
  2235. if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
  2236. start = phy_addr_get(mp) & 0x1f;
  2237. num = 32;
  2238. } else {
  2239. start = phy_addr & 0x1f;
  2240. num = 1;
  2241. }
  2242. phydev = NULL;
  2243. for (i = 0; i < num; i++) {
  2244. int addr = (start + i) & 0x1f;
  2245. if (bus->phy_map[addr] == NULL)
  2246. mdiobus_scan(bus, addr);
  2247. if (phydev == NULL) {
  2248. phydev = bus->phy_map[addr];
  2249. if (phydev != NULL)
  2250. phy_addr_set(mp, addr);
  2251. }
  2252. }
  2253. return phydev;
  2254. }
  2255. static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
  2256. {
  2257. struct phy_device *phy = mp->phy;
  2258. phy_reset(mp);
  2259. phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII);
  2260. if (speed == 0) {
  2261. phy->autoneg = AUTONEG_ENABLE;
  2262. phy->speed = 0;
  2263. phy->duplex = 0;
  2264. phy->advertising = phy->supported | ADVERTISED_Autoneg;
  2265. } else {
  2266. phy->autoneg = AUTONEG_DISABLE;
  2267. phy->advertising = 0;
  2268. phy->speed = speed;
  2269. phy->duplex = duplex;
  2270. }
  2271. phy_start_aneg(phy);
  2272. }
  2273. static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
  2274. {
  2275. u32 pscr;
  2276. pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  2277. if (pscr & SERIAL_PORT_ENABLE) {
  2278. pscr &= ~SERIAL_PORT_ENABLE;
  2279. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  2280. }
  2281. pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
  2282. if (mp->phy == NULL) {
  2283. pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
  2284. if (speed == SPEED_1000)
  2285. pscr |= SET_GMII_SPEED_TO_1000;
  2286. else if (speed == SPEED_100)
  2287. pscr |= SET_MII_SPEED_TO_100;
  2288. pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
  2289. pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
  2290. if (duplex == DUPLEX_FULL)
  2291. pscr |= SET_FULL_DUPLEX_MODE;
  2292. }
  2293. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  2294. }
  2295. static const struct net_device_ops mv643xx_eth_netdev_ops = {
  2296. .ndo_open = mv643xx_eth_open,
  2297. .ndo_stop = mv643xx_eth_stop,
  2298. .ndo_start_xmit = mv643xx_eth_xmit,
  2299. .ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
  2300. .ndo_set_mac_address = mv643xx_eth_set_mac_address,
  2301. .ndo_validate_addr = eth_validate_addr,
  2302. .ndo_do_ioctl = mv643xx_eth_ioctl,
  2303. .ndo_change_mtu = mv643xx_eth_change_mtu,
  2304. .ndo_set_features = mv643xx_eth_set_features,
  2305. .ndo_tx_timeout = mv643xx_eth_tx_timeout,
  2306. .ndo_get_stats = mv643xx_eth_get_stats,
  2307. #ifdef CONFIG_NET_POLL_CONTROLLER
  2308. .ndo_poll_controller = mv643xx_eth_netpoll,
  2309. #endif
  2310. };
  2311. static int mv643xx_eth_probe(struct platform_device *pdev)
  2312. {
  2313. struct mv643xx_eth_platform_data *pd;
  2314. struct mv643xx_eth_private *mp;
  2315. struct net_device *dev;
  2316. struct resource *res;
  2317. int err;
  2318. pd = pdev->dev.platform_data;
  2319. if (pd == NULL) {
  2320. dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
  2321. return -ENODEV;
  2322. }
  2323. if (pd->shared == NULL) {
  2324. dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
  2325. return -ENODEV;
  2326. }
  2327. dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
  2328. if (!dev)
  2329. return -ENOMEM;
  2330. mp = netdev_priv(dev);
  2331. platform_set_drvdata(pdev, mp);
  2332. mp->shared = platform_get_drvdata(pd->shared);
  2333. mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
  2334. mp->port_num = pd->port_number;
  2335. mp->dev = dev;
  2336. set_params(mp, pd);
  2337. netif_set_real_num_tx_queues(dev, mp->txq_count);
  2338. netif_set_real_num_rx_queues(dev, mp->rxq_count);
  2339. if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
  2340. mp->phy = phy_scan(mp, pd->phy_addr);
  2341. if (mp->phy != NULL)
  2342. phy_init(mp, pd->speed, pd->duplex);
  2343. SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
  2344. init_pscr(mp, pd->speed, pd->duplex);
  2345. mib_counters_clear(mp);
  2346. init_timer(&mp->mib_counters_timer);
  2347. mp->mib_counters_timer.data = (unsigned long)mp;
  2348. mp->mib_counters_timer.function = mib_counters_timer_wrapper;
  2349. mp->mib_counters_timer.expires = jiffies + 30 * HZ;
  2350. add_timer(&mp->mib_counters_timer);
  2351. spin_lock_init(&mp->mib_counters_lock);
  2352. INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
  2353. netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
  2354. init_timer(&mp->rx_oom);
  2355. mp->rx_oom.data = (unsigned long)mp;
  2356. mp->rx_oom.function = oom_timer_wrapper;
  2357. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  2358. BUG_ON(!res);
  2359. dev->irq = res->start;
  2360. dev->netdev_ops = &mv643xx_eth_netdev_ops;
  2361. dev->watchdog_timeo = 2 * HZ;
  2362. dev->base_addr = 0;
  2363. dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
  2364. NETIF_F_RXCSUM | NETIF_F_LRO;
  2365. dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
  2366. dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
  2367. dev->priv_flags |= IFF_UNICAST_FLT;
  2368. SET_NETDEV_DEV(dev, &pdev->dev);
  2369. if (mp->shared->win_protect)
  2370. wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
  2371. netif_carrier_off(dev);
  2372. wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
  2373. set_rx_coal(mp, 250);
  2374. set_tx_coal(mp, 0);
  2375. err = register_netdev(dev);
  2376. if (err)
  2377. goto out;
  2378. netdev_notice(dev, "port %d with MAC address %pM\n",
  2379. mp->port_num, dev->dev_addr);
  2380. if (mp->tx_desc_sram_size > 0)
  2381. netdev_notice(dev, "configured with sram\n");
  2382. return 0;
  2383. out:
  2384. free_netdev(dev);
  2385. return err;
  2386. }
  2387. static int mv643xx_eth_remove(struct platform_device *pdev)
  2388. {
  2389. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2390. unregister_netdev(mp->dev);
  2391. if (mp->phy != NULL)
  2392. phy_detach(mp->phy);
  2393. cancel_work_sync(&mp->tx_timeout_task);
  2394. free_netdev(mp->dev);
  2395. platform_set_drvdata(pdev, NULL);
  2396. return 0;
  2397. }
  2398. static void mv643xx_eth_shutdown(struct platform_device *pdev)
  2399. {
  2400. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2401. /* Mask all interrupts on ethernet port */
  2402. wrlp(mp, INT_MASK, 0);
  2403. rdlp(mp, INT_MASK);
  2404. if (netif_running(mp->dev))
  2405. port_reset(mp);
  2406. }
  2407. static struct platform_driver mv643xx_eth_driver = {
  2408. .probe = mv643xx_eth_probe,
  2409. .remove = mv643xx_eth_remove,
  2410. .shutdown = mv643xx_eth_shutdown,
  2411. .driver = {
  2412. .name = MV643XX_ETH_NAME,
  2413. .owner = THIS_MODULE,
  2414. },
  2415. };
  2416. static int __init mv643xx_eth_init_module(void)
  2417. {
  2418. int rc;
  2419. rc = platform_driver_register(&mv643xx_eth_shared_driver);
  2420. if (!rc) {
  2421. rc = platform_driver_register(&mv643xx_eth_driver);
  2422. if (rc)
  2423. platform_driver_unregister(&mv643xx_eth_shared_driver);
  2424. }
  2425. return rc;
  2426. }
  2427. module_init(mv643xx_eth_init_module);
  2428. static void __exit mv643xx_eth_cleanup_module(void)
  2429. {
  2430. platform_driver_unregister(&mv643xx_eth_driver);
  2431. platform_driver_unregister(&mv643xx_eth_shared_driver);
  2432. }
  2433. module_exit(mv643xx_eth_cleanup_module);
  2434. MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
  2435. "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
  2436. MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
  2437. MODULE_LICENSE("GPL");
  2438. MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
  2439. MODULE_ALIAS("platform:" MV643XX_ETH_NAME);